aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/gpu.tmpl943
-rw-r--r--Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt54
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dp.txt41
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt12
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp.txt26
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt20
-rw-r--r--Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt22
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt60
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt10
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt4
-rw-r--r--Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt2
-rw-r--r--Documentation/devicetree/bindings/media/exynos5-gsc.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt7
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt4
-rw-r--r--Documentation/networking/e100.txt14
-rw-r--r--MAINTAINERS29
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi1
-rw-r--r--arch/arc/boot/dts/nsim_hs.dts3
-rw-r--r--arch/arc/include/asm/mach_desc.h4
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/unwind.h4
-rw-r--r--arch/arc/kernel/intc-arcv2.c15
-rw-r--r--arch/arc/kernel/irq.c33
-rw-r--r--arch/arc/kernel/mcip.c2
-rw-r--r--arch/arc/kernel/perf_event.c32
-rw-r--r--arch/arc/kernel/setup.c1
-rw-r--r--arch/arc/kernel/smp.c8
-rw-r--r--arch/arc/kernel/unwind.c53
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/arm/boot/dts/am4372.dtsi4
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts1
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi8
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi8
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts15
-rw-r--r--arch/arm/boot/dts/vf610-colibri.dtsi5
-rw-r--r--arch/arm/boot/dts/vf610.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi6
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/uaccess.h4
-rw-r--r--arch/arm/kernel/process.c33
-rw-r--r--arch/arm/kernel/swp_emulate.c6
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c29
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/pm.c7
-rw-r--r--arch/arm/mach-exynos/pmu.c6
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h12
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-pxa/ezx.c5
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c2
-rw-r--r--arch/arm/mm/context.c38
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/init.c92
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi5
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S5
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/microblaze/kernel/dma.c3
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/pci.c18
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts8
-rw-r--r--arch/powerpc/include/asm/systbl.h24
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h12
-rw-r--r--arch/powerpc/kernel/eeh_driver.c14
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c64
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/s390/kernel/dis.c17
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/perf_event.c2
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/tile/kernel/perf_event.c2
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/net_user.c10
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
-rw-r--r--arch/x86/kernel/irq_work.c2
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mtrr.c25
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c7
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/um/signal.c18
-rw-r--r--arch/x86/xen/mmu.c9
-rw-r--r--arch/x86/xen/suspend.c20
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-core.c16
-rw-r--r--crypto/ablkcipher.c2
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--drivers/acpi/nfit.c2
-rw-r--r--drivers/ata/ahci.c22
-rw-r--r--drivers/ata/ahci_mvebu.c5
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/sata_fsl.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/power/domain.c33
-rw-r--r--drivers/block/null_blk.c38
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/common.h8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/clk/clk-gpio.c33
-rw-r--r--drivers/clk/clk-qoriq.c4
-rw-r--r--drivers/clk/clk-scpi.c1
-rw-r--r--drivers/clk/imx/clk-pllv1.c14
-rw-r--r--drivers/clk/imx/clk-pllv2.c9
-rw-r--r--drivers/clk/imx/clk-vf610.c8
-rw-r--r--drivers/clk/mmp/clk-mmp2.c1
-rw-r--r--drivers/clk/mmp/clk-pxa168.c1
-rw-r--r--drivers/clk/mmp/clk-pxa910.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c23
-rw-r--r--drivers/clk/ti/clk-816x.c2
-rw-r--r--drivers/clk/ti/clkt_dpll.c4
-rw-r--r--drivers/clk/ti/divider.c16
-rw-r--r--drivers/clk/ti/fapll.c4
-rw-r--r--drivers/clk/ti/mux.c15
-rw-r--r--drivers/clocksource/mmio.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/dma/at_xdmac.c9
-rw-r--r--drivers/dma/bcm2835-dma.c78
-rw-r--r--drivers/dma/edma.c53
-rw-r--r--drivers/dma/mic_x100_dma.c15
-rw-r--r--drivers/fpga/fpga-mgr.c13
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-generic.c4
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c328
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c163
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c235
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c317
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c273
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smum.h (renamed from drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2987
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h198
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smum.h (renamed from drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c261
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c238
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c153
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h)61
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h50
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie_helpers.h141
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h21
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h13
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h79
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h124
-rw-r--r--drivers/gpu/drm/amd/powerplay/Kconfig6
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile22
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c660
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/Makefile11
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c289
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h34
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c215
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h59
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c114
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c410
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h100
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c438
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h88
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c117
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.h38
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c252
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h37
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c1737
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h326
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c114
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c5127
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h361
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c553
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c687
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c155
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c334
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c563
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c76
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c1207
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h246
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h612
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c64
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c1688
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h47
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c6075
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h408
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h406
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c1142
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h61
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h299
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h (renamed from drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h)1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmanager.h109
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmgr.h125
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h10299
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h385
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h801
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h200
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h28
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h47
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h47
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_feature.h67
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h39
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h46
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7.h (renamed from drivers/gpu/drm/amd/amdgpu/smu7.h)0
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72.h664
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h760
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu73.h720
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h799
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h (renamed from drivers/gpu/drm/amd/amdgpu/smu7_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h (renamed from drivers/gpu/drm/amd/amdgpu/smu7_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu8.h (renamed from drivers/gpu/drm/amd/amdgpu/smu8.h)0
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h (renamed from drivers/gpu/drm/amd/amdgpu/smu8_fusion.h)8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h (renamed from drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h)0
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h100
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h182
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h420
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c858
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h102
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c1042
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h77
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c263
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c819
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h53
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c27
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h9
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c15
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c4
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c29
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c9
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c10
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c (renamed from drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c)2
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-audio.h (renamed from drivers/gpu/drm/bridge/dw_hdmi-audio.h)0
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c (renamed from drivers/gpu/drm/bridge/dw_hdmi.c)33
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.h (renamed from drivers/gpu/drm/bridge/dw_hdmi.h)0
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c6
-rw-r--r--drivers/gpu/drm/drm_atomic.c135
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c126
-rw-r--r--drivers/gpu/drm/drm_bridge.c69
-rw-r--r--drivers/gpu/drm/drm_crtc.c109
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c93
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c145
-rw-r--r--drivers/gpu/drm/drm_drv.c30
-rw-r--r--drivers/gpu/drm/drm_edid.c94
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c109
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c3
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c38
-rw-r--r--drivers/gpu/drm/drm_modes.c83
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c14
-rw-r--r--drivers/gpu/drm/drm_pci.c20
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c9
-rw-r--r--drivers/gpu/drm/drm_prime.c16
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c130
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig20
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile14
-rw-r--r--drivers/gpu/drm/etnaviv/cmdstream.xml.h218
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h249
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c268
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c209
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c707
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h161
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c227
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.h54
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c899
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h117
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c122
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c443
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c1647
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h209
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c240
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c33
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c299
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h71
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h351
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h407
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c179
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c189
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c81
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h97
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c161
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c160
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c259
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c59
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c384
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h4
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/power.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c22
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h48
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c31
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rwxr-xr-xdrivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c6
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c26
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c12
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c52
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c35
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c508
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c195
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c533
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c198
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c278
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c23
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c129
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c76
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h7
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0002.h66
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0046.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl006b.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506e.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506f.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h99
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507a.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507b.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507c.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507d.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507e.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl9097.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h701
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0001.h46
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0004.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0005.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/unpack.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c224
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c197
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c30
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h332
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c116
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c192
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c228
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/pcie.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h1366
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h1284
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h338
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h1516
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c44
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c304
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c55
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c804
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h26
-rw-r--r--drivers/gpu/drm/panel/Kconfig19
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c334
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c387
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c123
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/Kconfig9
-rw-r--r--drivers/gpu/drm/radeon/Makefile4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c108
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c30
-rw-r--r--drivers/gpu/drm/radeon/cik.c10
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c16
-rw-r--r--drivers/gpu/drm/radeon/drm_buffer.c177
-rw-r--r--drivers/gpu/drm/radeon/drm_buffer.h148
-rw-r--r--drivers/gpu/drm/radeon/r100.c5
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c1186
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c874
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2660
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c95
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2243
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c97
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h2048
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c402
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c3261
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c100
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c12
-rw-r--r--drivers/gpu/drm/radeon/sid.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/Makefile4
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c1194
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c25
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c125
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1208
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h230
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c316
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h169
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c1
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c7
-rw-r--r--drivers/gpu/drm/tegra/dc.c15
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c41
-rw-r--r--drivers/gpu/drm/tegra/drm.c90
-rw-r--r--drivers/gpu/drm/tegra/drm.h27
-rw-r--r--drivers/gpu/drm/tegra/dsi.c15
-rw-r--r--drivers/gpu/drm/tegra/fb.c26
-rw-r--r--drivers/gpu/drm/tegra/gem.c15
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c147
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c3
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/Makefile11
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c517
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c103
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h318
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c866
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c210
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c149
-rw-r--r--drivers/gpu/drm/vc4/vc4_packet.h399
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c42
-rw-r--r--drivers/gpu/drm/vc4/vc4_qpu_defines.h264
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c634
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace.h63
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace_points.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c262
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c900
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c513
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c18
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c9
-rw-r--r--drivers/gpu/host1x/Makefile3
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c33
-rw-r--r--drivers/gpu/host1x/hw/host1x05.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x05.h26
-rw-r--r--drivers/gpu/host1x/hw/host1x05_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x05_channel.h121
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x05_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x05_uclass.h181
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c6
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c9
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/tmp102.c16
-rw-r--r--drivers/i2c/busses/i2c-davinci.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c27
-rw-r--r--drivers/i2c/busses/i2c-rcar.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c4
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c6
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/mad.c5
-rw-r--r--drivers/infiniband/core/sa_query.c32
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c27
-rw-r--r--drivers/infiniband/core/verbs.c43
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c19
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h5
-rw-r--r--drivers/input/joystick/db9.c1
-rw-r--r--drivers/input/joystick/gamecon.c1
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/joystick/walkera0701.c1
-rw-r--r--drivers/input/misc/arizona-haptics.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/tablet/aiptek.c9
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c34
-rw-r--r--drivers/input/touchscreen/elants_i2c.c21
-rw-r--r--drivers/iommu/amd_iommu_v2.c20
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c20
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c5
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c23
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c7
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c85
-rw-r--r--drivers/lightnvm/gennvm.c20
-rw-r--r--drivers/lightnvm/rrpc.c25
-rw-r--r--drivers/md/dm-thin-metadata.c34
-rw-r--r--drivers/md/md.c22
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-btree.c101
-rw-r--r--drivers/md/persistent-data/dm-btree.h14
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c32
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c13
-rw-r--r--drivers/misc/cxl/native.c2
-rw-r--r--drivers/mtd/ofpart.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c38
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c39
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c30
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c55
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h25
-rw-r--r--drivers/net/ethernet/sfc/ef10.c24
-rw-r--r--drivers/net/ethernet/sfc/efx.h5
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/phy/mdio-mux.c7
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/ppp/pppoe.c14
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/usb/cdc_mbim.c26
-rw-r--r--drivers/net/usb/cdc_ncm.c10
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/virtio_net.c34
-rw-r--r--drivers/net/vxlan.c75
-rw-r--r--drivers/net/xen-netback/netback.c34
-rw-r--r--drivers/nvme/host/lightnvm.c26
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/of/irq.c3
-rw-r--r--drivers/of/of_reserved_mem.c8
-rw-r--r--drivers/parisc/iommu-helpers.h15
-rw-r--r--drivers/pci/host/pcie-altera.c23
-rw-r--r--drivers/pci/msi.c4
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/phy-bcm-cygnus-pcie.c16
-rw-r--r--drivers/phy/phy-berlin-sata.c20
-rw-r--r--drivers/phy/phy-brcmstb-sata.c17
-rw-r--r--drivers/phy/phy-core.c21
-rw-r--r--drivers/phy/phy-miphy28lp.c16
-rw-r--r--drivers/phy/phy-miphy365x.c16
-rw-r--r--drivers/phy/phy-mt65xx-usb3.c20
-rw-r--r--drivers/phy/phy-rockchip-usb.c17
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c41
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c1
-rw-r--r--drivers/platform/x86/apple-gmux.c113
-rw-r--r--drivers/powercap/intel_rapl.c7
-rw-r--r--drivers/rtc/rtc-da9063.c19
-rw-r--r--drivers/rtc/rtc-rk808.c48
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c62
-rw-r--r--drivers/scsi/scsi_pm.c20
-rw-r--r--drivers/scsi/ses.c30
-rw-r--r--drivers/spi/spi-fsl-dspi.c12
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c2
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c20
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c8
-rw-r--r--drivers/tty/serial/earlycon.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/hub.c44
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/platform.c81
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_midi.c3
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/host/ohci-at91.c11
-rw-r--r--drivers/usb/host/whci/qset.c4
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c5
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c11
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ipaq.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c1
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/vfio/Kconfig15
-rw-r--r--drivers/vfio/pci/vfio_pci.c10
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c5
-rw-r--r--drivers/vfio/vfio.c188
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c13
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c12
-rw-r--r--drivers/virtio/virtio.c1
-rw-r--r--drivers/virtio/virtio_ring.c48
-rw-r--r--drivers/xen/events/events_fifo.c23
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c75
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/btrfs/extent-tree.c10
-rw-r--r--fs/btrfs/file.c18
-rw-r--r--fs/btrfs/free-space-cache.c10
-rw-r--r--fs/btrfs/transaction.c1
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/volumes.c3
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/direct-io.c1
-rw-r--r--fs/exofs/inode.c5
-rw-r--r--fs/ext4/crypto.c2
-rw-r--r--fs/ext4/ext4.h51
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/ext4/sysfs.c2
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/jbd2/transaction.c12
-rw-r--r--fs/nfs/callback_xdr.c7
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/objlayout/objio_osd.c5
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c4
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/proc/base.c1
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/drm/drmP.h10
-rw-r--r--include/drm/drm_atomic.h4
-rw-r--r--include/drm/drm_atomic_helper.h2
-rw-r--r--include/drm/drm_crtc.h1230
-rw-r--r--include/drm/drm_crtc_helper.h165
-rw-r--r--include/drm/drm_dp_mst_helper.h4
-rw-r--r--include/drm/drm_encoder_slave.h2
-rw-r--r--include/drm/drm_fb_helper.h101
-rw-r--r--include/drm/drm_mipi_dsi.h27
-rw-r--r--include/drm/drm_modes.h345
-rw-r--r--include/drm/drm_modeset_helper_vtables.h928
-rw-r--r--include/drm/drm_plane_helper.h38
-rw-r--r--include/drm/ttm/ttm_bo_api.h10
-rw-r--r--include/drm/ttm/ttm_bo_driver.h4
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/cgroup-defs.h13
-rw-r--r--include/linux/cgroup.h47
-rw-r--r--include/linux/enclosure.h4
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/jump_label.h2
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lightnvm.h21
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mlx4/device.h11
-rw-r--r--include/linux/mmdebug.h1
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/of_irq.h19
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/platform_data/edma.h2
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/qed/common_hsi.h2
-rw-r--r--include/linux/qed/qed_chain.h3
-rw-r--r--include/linux/rhashtable.h18
-rw-r--r--include/linux/stop_machine.h6
-rw-r--r--include/linux/uprobes.h2
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/vfio.h3
-rw-r--r--include/linux/wait.h10
-rw-r--r--include/net/dst.h33
-rw-r--r--include/net/inet_sock.h27
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h7
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/xfrm.h25
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/sound/hda_register.h3
-rw-r--r--include/uapi/drm/Kbuild2
-rw-r--r--include/uapi/drm/amdgpu_drm.h290
-rw-r--r--include/uapi/drm/armada_drm.h2
-rw-r--r--include/uapi/drm/drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h2
-rw-r--r--include/uapi/drm/drm_mode.h18
-rw-r--r--include/uapi/drm/drm_sarea.h2
-rw-r--r--include/uapi/drm/etnaviv_drm.h222
-rw-r--r--include/uapi/drm/exynos_drm.h8
-rw-r--r--include/uapi/drm/i810_drm.h2
-rw-r--r--include/uapi/drm/i915_drm.h2
-rw-r--r--include/uapi/drm/mga_drm.h2
-rw-r--r--include/uapi/drm/msm_drm.h5
-rw-r--r--include/uapi/drm/nouveau_drm.h86
-rw-r--r--include/uapi/drm/omap_drm.h8
-rw-r--r--include/uapi/drm/qxl_drm.h77
-rw-r--r--include/uapi/drm/r128_drm.h2
-rw-r--r--include/uapi/drm/radeon_drm.h128
-rw-r--r--include/uapi/drm/savage_drm.h2
-rw-r--r--include/uapi/drm/tegra_drm.h2
-rw-r--r--include/uapi/drm/vc4_drm.h279
-rw-r--r--include/uapi/drm/via_drm.h5
-rw-r--r--include/uapi/drm/virtgpu_drm.h101
-rw-r--r--include/uapi/drm/vmwgfx_drm.h268
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/agpgart.h1
-rw-r--r--include/uapi/linux/openvswitch.h2
-rw-r--r--include/uapi/linux/vfio.h7
-rw-r--r--include/uapi/linux/virtio_gpu.h2
-rw-r--r--include/xen/interface/io/ring.h14
-rw-r--r--init/Kconfig7
-rw-r--r--kernel/cgroup.c99
-rw-r--r--kernel/cgroup_freezer.c23
-rw-r--r--kernel/cgroup_pids.c77
-rw-r--r--kernel/cpuset.c33
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c90
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/jump_label.c2
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/osq_lock.c8
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/wait.c20
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--kernel/trace/trace_event_perf.c2
-rw-r--r--lib/btree.c2
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/proportions.c2
-rw-r--r--lib/rhashtable.c67
-rw-r--r--mm/backing-dev.c19
-rw-r--r--mm/hugetlb.c27
-rw-r--r--mm/memcontrol.c49
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/shmem.c34
-rw-r--r--mm/vmstat.c8
-rw-r--r--mm/zswap.c6
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/batman-adv/distributed-arp-table.c5
-rw-r--r--net/batman-adv/routing.c19
-rw-r--r--net/batman-adv/translation-table.c16
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/core/netclassid_cgroup.c6
-rw-r--r--net/core/netprio_cgroup.c9
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c7
-rw-r--r--net/decnet/af_decnet.c3
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/fou.c3
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c23
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/netfilter/Kconfig1
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/mlme.c17
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/util.c113
-rw-r--r--net/mac80211/vht.c10
-rw-r--r--net/mpls/af_mpls.c43
-rw-r--r--net/mpls/mpls_iptunnel.c4
-rw-r--r--net/netfilter/nf_tables_api.c99
-rw-r--r--net/netfilter/nfnetlink.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/openvswitch/conntrack.c16
-rw-r--r--net/rfkill/core.c6
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/ipv6.c11
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/sctp/socket.c12
-rw-r--r--net/socket.c1
-rw-r--r--net/sunrpc/backchannel_rqst.c8
-rw-r--r--net/sunrpc/sched.c6
-rw-r--r--net/sunrpc/svc.c12
-rw-r--r--net/unix/af_unix.c13
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/xfrm/xfrm_policy.c50
-rwxr-xr-xscripts/link-vmlinux.sh2
-rw-r--r--sound/pci/hda/hda_intel.c23
-rw-r--r--sound/pci/hda/patch_ca0132.c3
-rw-r--r--sound/pci/hda/patch_realtek.c62
-rw-r--r--sound/pci/rme96.c41
-rw-r--r--sound/usb/mixer.c2
-rw-r--r--sound/usb/mixer_maps.c12
-rw-r--r--sound/usb/mixer_quirks.c37
-rw-r--r--sound/usb/mixer_quirks.h4
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/virtio/linux/kernel.h6
-rw-r--r--tools/virtio/linux/virtio.h6
-rw-r--r--tools/virtio/linux/virtio_config.h20
-rw-r--r--virt/kvm/arm/vgic.c2
1183 files changed, 85580 insertions, 28178 deletions
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 0061f22d126d..351e801cead9 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -124,6 +124,43 @@
124 <para> 124 <para>
125 [Insert diagram of typical DRM stack here] 125 [Insert diagram of typical DRM stack here]
126 </para> 126 </para>
127 <sect1>
128 <title>Style Guidelines</title>
129 <para>
130 For consistency this documentation uses American English. Abbreviations
131 are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
132 on. To aid in reading, documentations make full use of the markup
133 characters kerneldoc provides: @parameter for function parameters, @member
134 for structure members, &amp;structure to reference structures and
135 function() for functions. These all get automatically hyperlinked if
136 kerneldoc for the referenced objects exists. When referencing entries in
137 function vtables please use -&gt;vfunc(). Note that kerneldoc does
138 not support referencing struct members directly, so please add a reference
139 to the vtable struct somewhere in the same paragraph or at least section.
140 </para>
141 <para>
142 Except in special situations (to separate locked from unlocked variants)
143 locking requirements for functions aren't documented in the kerneldoc.
144 Instead locking should be check at runtime using e.g.
145 <code>WARN_ON(!mutex_is_locked(...));</code>. Since it's much easier to
146 ignore documentation than runtime noise this provides more value. And on
147 top of that runtime checks do need to be updated when the locking rules
148 change, increasing the chances that they're correct. Within the
149 documentation the locking rules should be explained in the relevant
150 structures: Either in the comment for the lock explaining what it
151 protects, or data fields need a note about which lock protects them, or
152 both.
153 </para>
154 <para>
155 Functions which have a non-<code>void</code> return value should have a
156 section called "Returns" explaining the expected return values in
157 different cases and their meanings. Currently there's no consensus whether
158 that section name should be all upper-case or not, and whether it should
159 end in a colon or not. Go with the file-local style. Other common section
160 names are "Notes" with information for dangerous or tricky corner cases,
161 and "FIXME" where the interface could be cleaned up.
162 </para>
163 </sect1>
127 </chapter> 164 </chapter>
128 165
129 <!-- Internals --> 166 <!-- Internals -->
@@ -946,12 +983,10 @@ int max_width, max_height;</synopsis>
946 <sect2> 983 <sect2>
947 <title>Atomic Mode Setting Function Reference</title> 984 <title>Atomic Mode Setting Function Reference</title>
948!Edrivers/gpu/drm/drm_atomic.c 985!Edrivers/gpu/drm/drm_atomic.c
986!Idrivers/gpu/drm/drm_atomic.c
949 </sect2> 987 </sect2>
950 <sect2> 988 <sect2>
951 <title>Frame Buffer Creation</title> 989 <title>Frame Buffer Abstraction</title>
952 <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
953 struct drm_file *file_priv,
954 struct drm_mode_fb_cmd2 *mode_cmd);</synopsis>
955 <para> 990 <para>
956 Frame buffers are abstract memory objects that provide a source of 991 Frame buffers are abstract memory objects that provide a source of
957 pixels to scanout to a CRTC. Applications explicitly request the 992 pixels to scanout to a CRTC. Applications explicitly request the
@@ -970,73 +1005,6 @@ int max_width, max_height;</synopsis>
970 and so expects TTM handles in the create ioctl and not GEM handles. 1005 and so expects TTM handles in the create ioctl and not GEM handles.
971 </para> 1006 </para>
972 <para> 1007 <para>
973 Drivers must first validate the requested frame buffer parameters passed
974 through the mode_cmd argument. In particular this is where invalid
975 sizes, pixel formats or pitches can be caught.
976 </para>
977 <para>
978 If the parameters are deemed valid, drivers then create, initialize and
979 return an instance of struct <structname>drm_framebuffer</structname>.
980 If desired the instance can be embedded in a larger driver-specific
981 structure. Drivers must fill its <structfield>width</structfield>,
982 <structfield>height</structfield>, <structfield>pitches</structfield>,
983 <structfield>offsets</structfield>, <structfield>depth</structfield>,
984 <structfield>bits_per_pixel</structfield> and
985 <structfield>pixel_format</structfield> fields from the values passed
986 through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
987 should call the <function>drm_helper_mode_fill_fb_struct</function>
988 helper function to do so.
989 </para>
990
991 <para>
992 The initialization of the new framebuffer instance is finalized with a
993 call to <function>drm_framebuffer_init</function> which takes a pointer
994 to DRM frame buffer operations (struct
995 <structname>drm_framebuffer_funcs</structname>). Note that this function
996 publishes the framebuffer and so from this point on it can be accessed
997 concurrently from other threads. Hence it must be the last step in the
998 driver's framebuffer initialization sequence. Frame buffer operations
999 are
1000 <itemizedlist>
1001 <listitem>
1002 <synopsis>int (*create_handle)(struct drm_framebuffer *fb,
1003 struct drm_file *file_priv, unsigned int *handle);</synopsis>
1004 <para>
1005 Create a handle to the frame buffer underlying memory object. If
1006 the frame buffer uses a multi-plane format, the handle will
1007 reference the memory object associated with the first plane.
1008 </para>
1009 <para>
1010 Drivers call <function>drm_gem_handle_create</function> to create
1011 the handle.
1012 </para>
1013 </listitem>
1014 <listitem>
1015 <synopsis>void (*destroy)(struct drm_framebuffer *framebuffer);</synopsis>
1016 <para>
1017 Destroy the frame buffer object and frees all associated
1018 resources. Drivers must call
1019 <function>drm_framebuffer_cleanup</function> to free resources
1020 allocated by the DRM core for the frame buffer object, and must
1021 make sure to unreference all memory objects associated with the
1022 frame buffer. Handles created by the
1023 <methodname>create_handle</methodname> operation are released by
1024 the DRM core.
1025 </para>
1026 </listitem>
1027 <listitem>
1028 <synopsis>int (*dirty)(struct drm_framebuffer *framebuffer,
1029 struct drm_file *file_priv, unsigned flags, unsigned color,
1030 struct drm_clip_rect *clips, unsigned num_clips);</synopsis>
1031 <para>
1032 This optional operation notifies the driver that a region of the
1033 frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB
1034 ioctl call.
1035 </para>
1036 </listitem>
1037 </itemizedlist>
1038 </para>
1039 <para>
1040 The lifetime of a drm framebuffer is controlled with a reference count, 1008 The lifetime of a drm framebuffer is controlled with a reference count,
1041 drivers can grab additional references with 1009 drivers can grab additional references with
1042 <function>drm_framebuffer_reference</function>and drop them 1010 <function>drm_framebuffer_reference</function>and drop them
@@ -1173,137 +1141,6 @@ int max_width, max_height;</synopsis>
1173 pointer to CRTC functions. 1141 pointer to CRTC functions.
1174 </para> 1142 </para>
1175 </sect3> 1143 </sect3>
1176 <sect3 id="drm-kms-crtcops">
1177 <title>CRTC Operations</title>
1178 <sect4>
1179 <title>Set Configuration</title>
1180 <synopsis>int (*set_config)(struct drm_mode_set *set);</synopsis>
1181 <para>
1182 Apply a new CRTC configuration to the device. The configuration
1183 specifies a CRTC, a frame buffer to scan out from, a (x,y) position in
1184 the frame buffer, a display mode and an array of connectors to drive
1185 with the CRTC if possible.
1186 </para>
1187 <para>
1188 If the frame buffer specified in the configuration is NULL, the driver
1189 must detach all encoders connected to the CRTC and all connectors
1190 attached to those encoders and disable them.
1191 </para>
1192 <para>
1193 This operation is called with the mode config lock held.
1194 </para>
1195 <note><para>
1196 Note that the drm core has no notion of restoring the mode setting
1197 state after resume, since all resume handling is in the full
1198 responsibility of the driver. The common mode setting helper library
1199 though provides a helper which can be used for this:
1200 <function>drm_helper_resume_force_mode</function>.
1201 </para></note>
1202 </sect4>
1203 <sect4>
1204 <title>Page Flipping</title>
1205 <synopsis>int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1206 struct drm_pending_vblank_event *event);</synopsis>
1207 <para>
1208 Schedule a page flip to the given frame buffer for the CRTC. This
1209 operation is called with the mode config mutex held.
1210 </para>
1211 <para>
1212 Page flipping is a synchronization mechanism that replaces the frame
1213 buffer being scanned out by the CRTC with a new frame buffer during
1214 vertical blanking, avoiding tearing. When an application requests a page
1215 flip the DRM core verifies that the new frame buffer is large enough to
1216 be scanned out by the CRTC in the currently configured mode and then
1217 calls the CRTC <methodname>page_flip</methodname> operation with a
1218 pointer to the new frame buffer.
1219 </para>
1220 <para>
1221 The <methodname>page_flip</methodname> operation schedules a page flip.
1222 Once any pending rendering targeting the new frame buffer has
1223 completed, the CRTC will be reprogrammed to display that frame buffer
1224 after the next vertical refresh. The operation must return immediately
1225 without waiting for rendering or page flip to complete and must block
1226 any new rendering to the frame buffer until the page flip completes.
1227 </para>
1228 <para>
1229 If a page flip can be successfully scheduled the driver must set the
1230 <code>drm_crtc-&gt;fb</code> field to the new framebuffer pointed to
1231 by <code>fb</code>. This is important so that the reference counting
1232 on framebuffers stays balanced.
1233 </para>
1234 <para>
1235 If a page flip is already pending, the
1236 <methodname>page_flip</methodname> operation must return
1237 -<errorname>EBUSY</errorname>.
1238 </para>
1239 <para>
1240 To synchronize page flip to vertical blanking the driver will likely
1241 need to enable vertical blanking interrupts. It should call
1242 <function>drm_vblank_get</function> for that purpose, and call
1243 <function>drm_vblank_put</function> after the page flip completes.
1244 </para>
1245 <para>
1246 If the application has requested to be notified when page flip completes
1247 the <methodname>page_flip</methodname> operation will be called with a
1248 non-NULL <parameter>event</parameter> argument pointing to a
1249 <structname>drm_pending_vblank_event</structname> instance. Upon page
1250 flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
1251 to fill in the event and send to wake up any waiting processes.
1252 This can be performed with
1253 <programlisting><![CDATA[
1254 spin_lock_irqsave(&dev->event_lock, flags);
1255 ...
1256 drm_send_vblank_event(dev, pipe, event);
1257 spin_unlock_irqrestore(&dev->event_lock, flags);
1258 ]]></programlisting>
1259 </para>
1260 <note><para>
1261 FIXME: Could drivers that don't need to wait for rendering to complete
1262 just add the event to <literal>dev-&gt;vblank_event_list</literal> and
1263 let the DRM core handle everything, as for "normal" vertical blanking
1264 events?
1265 </para></note>
1266 <para>
1267 While waiting for the page flip to complete, the
1268 <literal>event-&gt;base.link</literal> list head can be used freely by
1269 the driver to store the pending event in a driver-specific list.
1270 </para>
1271 <para>
1272 If the file handle is closed before the event is signaled, drivers must
1273 take care to destroy the event in their
1274 <methodname>preclose</methodname> operation (and, if needed, call
1275 <function>drm_vblank_put</function>).
1276 </para>
1277 </sect4>
1278 <sect4>
1279 <title>Miscellaneous</title>
1280 <itemizedlist>
1281 <listitem>
1282 <synopsis>void (*set_property)(struct drm_crtc *crtc,
1283 struct drm_property *property, uint64_t value);</synopsis>
1284 <para>
1285 Set the value of the given CRTC property to
1286 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1287 for more information about properties.
1288 </para>
1289 </listitem>
1290 <listitem>
1291 <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1292 uint32_t start, uint32_t size);</synopsis>
1293 <para>
1294 Apply a gamma table to the device. The operation is optional.
1295 </para>
1296 </listitem>
1297 <listitem>
1298 <synopsis>void (*destroy)(struct drm_crtc *crtc);</synopsis>
1299 <para>
1300 Destroy the CRTC when not needed anymore. See
1301 <xref linkend="drm-kms-init"/>.
1302 </para>
1303 </listitem>
1304 </itemizedlist>
1305 </sect4>
1306 </sect3>
1307 </sect2> 1144 </sect2>
1308 <sect2> 1145 <sect2>
1309 <title>Planes (struct <structname>drm_plane</structname>)</title> 1146 <title>Planes (struct <structname>drm_plane</structname>)</title>
@@ -1320,7 +1157,7 @@ int max_width, max_height;</synopsis>
1320 <listitem> 1157 <listitem>
1321 DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary 1158 DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary
1322 planes are the planes operated upon by CRTC modesetting and flipping 1159 planes are the planes operated upon by CRTC modesetting and flipping
1323 operations described in <xref linkend="drm-kms-crtcops"/>. 1160 operations described in the page_flip hook in <structname>drm_crtc_funcs</structname>.
1324 </listitem> 1161 </listitem>
1325 <listitem> 1162 <listitem>
1326 DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor 1163 DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor
@@ -1357,52 +1194,6 @@ int max_width, max_height;</synopsis>
1357 primary plane with standard capabilities. 1194 primary plane with standard capabilities.
1358 </para> 1195 </para>
1359 </sect3> 1196 </sect3>
1360 <sect3>
1361 <title>Plane Operations</title>
1362 <itemizedlist>
1363 <listitem>
1364 <synopsis>int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc,
1365 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
1366 unsigned int crtc_w, unsigned int crtc_h,
1367 uint32_t src_x, uint32_t src_y,
1368 uint32_t src_w, uint32_t src_h);</synopsis>
1369 <para>
1370 Enable and configure the plane to use the given CRTC and frame buffer.
1371 </para>
1372 <para>
1373 The source rectangle in frame buffer memory coordinates is given by
1374 the <parameter>src_x</parameter>, <parameter>src_y</parameter>,
1375 <parameter>src_w</parameter> and <parameter>src_h</parameter>
1376 parameters (as 16.16 fixed point values). Devices that don't support
1377 subpixel plane coordinates can ignore the fractional part.
1378 </para>
1379 <para>
1380 The destination rectangle in CRTC coordinates is given by the
1381 <parameter>crtc_x</parameter>, <parameter>crtc_y</parameter>,
1382 <parameter>crtc_w</parameter> and <parameter>crtc_h</parameter>
1383 parameters (as integer values). Devices scale the source rectangle to
1384 the destination rectangle. If scaling is not supported, and the source
1385 rectangle size doesn't match the destination rectangle size, the
1386 driver must return a -<errorname>EINVAL</errorname> error.
1387 </para>
1388 </listitem>
1389 <listitem>
1390 <synopsis>int (*disable_plane)(struct drm_plane *plane);</synopsis>
1391 <para>
1392 Disable the plane. The DRM core calls this method in response to a
1393 DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0.
1394 Disabled planes must not be processed by the CRTC.
1395 </para>
1396 </listitem>
1397 <listitem>
1398 <synopsis>void (*destroy)(struct drm_plane *plane);</synopsis>
1399 <para>
1400 Destroy the plane when not needed anymore. See
1401 <xref linkend="drm-kms-init"/>.
1402 </para>
1403 </listitem>
1404 </itemizedlist>
1405 </sect3>
1406 </sect2> 1197 </sect2>
1407 <sect2> 1198 <sect2>
1408 <title>Encoders (struct <structname>drm_encoder</structname>)</title> 1199 <title>Encoders (struct <structname>drm_encoder</structname>)</title>
@@ -1459,27 +1250,6 @@ int max_width, max_height;</synopsis>
1459 encoders they want to use to a CRTC. 1250 encoders they want to use to a CRTC.
1460 </para> 1251 </para>
1461 </sect3> 1252 </sect3>
1462 <sect3>
1463 <title>Encoder Operations</title>
1464 <itemizedlist>
1465 <listitem>
1466 <synopsis>void (*destroy)(struct drm_encoder *encoder);</synopsis>
1467 <para>
1468 Called to destroy the encoder when not needed anymore. See
1469 <xref linkend="drm-kms-init"/>.
1470 </para>
1471 </listitem>
1472 <listitem>
1473 <synopsis>void (*set_property)(struct drm_plane *plane,
1474 struct drm_property *property, uint64_t value);</synopsis>
1475 <para>
1476 Set the value of the given plane property to
1477 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1478 for more information about properties.
1479 </para>
1480 </listitem>
1481 </itemizedlist>
1482 </sect3>
1483 </sect2> 1253 </sect2>
1484 <sect2> 1254 <sect2>
1485 <title>Connectors (struct <structname>drm_connector</structname>)</title> 1255 <title>Connectors (struct <structname>drm_connector</structname>)</title>
@@ -1683,27 +1453,6 @@ int max_width, max_height;</synopsis>
1683 connector_status_unknown. 1453 connector_status_unknown.
1684 </para> 1454 </para>
1685 </sect4> 1455 </sect4>
1686 <sect4>
1687 <title>Miscellaneous</title>
1688 <itemizedlist>
1689 <listitem>
1690 <synopsis>void (*set_property)(struct drm_connector *connector,
1691 struct drm_property *property, uint64_t value);</synopsis>
1692 <para>
1693 Set the value of the given connector property to
1694 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1695 for more information about properties.
1696 </para>
1697 </listitem>
1698 <listitem>
1699 <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis>
1700 <para>
1701 Destroy the connector when not needed anymore. See
1702 <xref linkend="drm-kms-init"/>.
1703 </para>
1704 </listitem>
1705 </itemizedlist>
1706 </sect4>
1707 </sect3> 1456 </sect3>
1708 </sect2> 1457 </sect2>
1709 <sect2> 1458 <sect2>
@@ -1830,462 +1579,6 @@ void intel_crt_init(struct drm_device *dev)
1830 entities. 1579 entities.
1831 </para> 1580 </para>
1832 <sect2> 1581 <sect2>
1833 <title>Helper Functions</title>
1834 <itemizedlist>
1835 <listitem>
1836 <synopsis>int drm_crtc_helper_set_config(struct drm_mode_set *set);</synopsis>
1837 <para>
1838 The <function>drm_crtc_helper_set_config</function> helper function
1839 is a CRTC <methodname>set_config</methodname> implementation. It
1840 first tries to locate the best encoder for each connector by calling
1841 the connector <methodname>best_encoder</methodname> helper
1842 operation.
1843 </para>
1844 <para>
1845 After locating the appropriate encoders, the helper function will
1846 call the <methodname>mode_fixup</methodname> encoder and CRTC helper
1847 operations to adjust the requested mode, or reject it completely in
1848 which case an error will be returned to the application. If the new
1849 configuration after mode adjustment is identical to the current
1850 configuration the helper function will return without performing any
1851 other operation.
1852 </para>
1853 <para>
1854 If the adjusted mode is identical to the current mode but changes to
1855 the frame buffer need to be applied, the
1856 <function>drm_crtc_helper_set_config</function> function will call
1857 the CRTC <methodname>mode_set_base</methodname> helper operation. If
1858 the adjusted mode differs from the current mode, or if the
1859 <methodname>mode_set_base</methodname> helper operation is not
1860 provided, the helper function performs a full mode set sequence by
1861 calling the <methodname>prepare</methodname>,
1862 <methodname>mode_set</methodname> and
1863 <methodname>commit</methodname> CRTC and encoder helper operations,
1864 in that order.
1865 </para>
1866 </listitem>
1867 <listitem>
1868 <synopsis>void drm_helper_connector_dpms(struct drm_connector *connector, int mode);</synopsis>
1869 <para>
1870 The <function>drm_helper_connector_dpms</function> helper function
1871 is a connector <methodname>dpms</methodname> implementation that
1872 tracks power state of connectors. To use the function, drivers must
1873 provide <methodname>dpms</methodname> helper operations for CRTCs
1874 and encoders to apply the DPMS state to the device.
1875 </para>
1876 <para>
1877 The mid-layer doesn't track the power state of CRTCs and encoders.
1878 The <methodname>dpms</methodname> helper operations can thus be
1879 called with a mode identical to the currently active mode.
1880 </para>
1881 </listitem>
1882 <listitem>
1883 <synopsis>int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
1884 uint32_t maxX, uint32_t maxY);</synopsis>
1885 <para>
1886 The <function>drm_helper_probe_single_connector_modes</function> helper
1887 function is a connector <methodname>fill_modes</methodname>
1888 implementation that updates the connection status for the connector
1889 and then retrieves a list of modes by calling the connector
1890 <methodname>get_modes</methodname> helper operation.
1891 </para>
1892 <para>
1893 If the helper operation returns no mode, and if the connector status
1894 is connector_status_connected, standard VESA DMT modes up to
1895 1024x768 are automatically added to the modes list by a call to
1896 <function>drm_add_modes_noedid</function>.
1897 </para>
1898 <para>
1899 The function then filters out modes larger than
1900 <parameter>max_width</parameter> and <parameter>max_height</parameter>
1901 if specified. It finally calls the optional connector
1902 <methodname>mode_valid</methodname> helper operation for each mode in
1903 the probed list to check whether the mode is valid for the connector.
1904 </para>
1905 </listitem>
1906 </itemizedlist>
1907 </sect2>
1908 <sect2>
1909 <title>CRTC Helper Operations</title>
1910 <itemizedlist>
1911 <listitem id="drm-helper-crtc-mode-fixup">
1912 <synopsis>bool (*mode_fixup)(struct drm_crtc *crtc,
1913 const struct drm_display_mode *mode,
1914 struct drm_display_mode *adjusted_mode);</synopsis>
1915 <para>
1916 Let CRTCs adjust the requested mode or reject it completely. This
1917 operation returns true if the mode is accepted (possibly after being
1918 adjusted) or false if it is rejected.
1919 </para>
1920 <para>
1921 The <methodname>mode_fixup</methodname> operation should reject the
1922 mode if it can't reasonably use it. The definition of "reasonable"
1923 is currently fuzzy in this context. One possible behaviour would be
1924 to set the adjusted mode to the panel timings when a fixed-mode
1925 panel is used with hardware capable of scaling. Another behaviour
1926 would be to accept any input mode and adjust it to the closest mode
1927 supported by the hardware (FIXME: This needs to be clarified).
1928 </para>
1929 </listitem>
1930 <listitem>
1931 <synopsis>int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
1932 struct drm_framebuffer *old_fb)</synopsis>
1933 <para>
1934 Move the CRTC on the current frame buffer (stored in
1935 <literal>crtc-&gt;fb</literal>) to position (x,y). Any of the frame
1936 buffer, x position or y position may have been modified.
1937 </para>
1938 <para>
1939 This helper operation is optional. If not provided, the
1940 <function>drm_crtc_helper_set_config</function> function will fall
1941 back to the <methodname>mode_set</methodname> helper operation.
1942 </para>
1943 <note><para>
1944 FIXME: Why are x and y passed as arguments, as they can be accessed
1945 through <literal>crtc-&gt;x</literal> and
1946 <literal>crtc-&gt;y</literal>?
1947 </para></note>
1948 </listitem>
1949 <listitem>
1950 <synopsis>void (*prepare)(struct drm_crtc *crtc);</synopsis>
1951 <para>
1952 Prepare the CRTC for mode setting. This operation is called after
1953 validating the requested mode. Drivers use it to perform
1954 device-specific operations required before setting the new mode.
1955 </para>
1956 </listitem>
1957 <listitem>
1958 <synopsis>int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
1959 struct drm_display_mode *adjusted_mode, int x, int y,
1960 struct drm_framebuffer *old_fb);</synopsis>
1961 <para>
1962 Set a new mode, position and frame buffer. Depending on the device
1963 requirements, the mode can be stored internally by the driver and
1964 applied in the <methodname>commit</methodname> operation, or
1965 programmed to the hardware immediately.
1966 </para>
1967 <para>
1968 The <methodname>mode_set</methodname> operation returns 0 on success
1969 or a negative error code if an error occurs.
1970 </para>
1971 </listitem>
1972 <listitem>
1973 <synopsis>void (*commit)(struct drm_crtc *crtc);</synopsis>
1974 <para>
1975 Commit a mode. This operation is called after setting the new mode.
1976 Upon return the device must use the new mode and be fully
1977 operational.
1978 </para>
1979 </listitem>
1980 </itemizedlist>
1981 </sect2>
1982 <sect2>
1983 <title>Encoder Helper Operations</title>
1984 <itemizedlist>
1985 <listitem>
1986 <synopsis>bool (*mode_fixup)(struct drm_encoder *encoder,
1987 const struct drm_display_mode *mode,
1988 struct drm_display_mode *adjusted_mode);</synopsis>
1989 <para>
1990 Let encoders adjust the requested mode or reject it completely. This
1991 operation returns true if the mode is accepted (possibly after being
1992 adjusted) or false if it is rejected. See the
1993 <link linkend="drm-helper-crtc-mode-fixup">mode_fixup CRTC helper
1994 operation</link> for an explanation of the allowed adjustments.
1995 </para>
1996 </listitem>
1997 <listitem>
1998 <synopsis>void (*prepare)(struct drm_encoder *encoder);</synopsis>
1999 <para>
2000 Prepare the encoder for mode setting. This operation is called after
2001 validating the requested mode. Drivers use it to perform
2002 device-specific operations required before setting the new mode.
2003 </para>
2004 </listitem>
2005 <listitem>
2006 <synopsis>void (*mode_set)(struct drm_encoder *encoder,
2007 struct drm_display_mode *mode,
2008 struct drm_display_mode *adjusted_mode);</synopsis>
2009 <para>
2010 Set a new mode. Depending on the device requirements, the mode can
2011 be stored internally by the driver and applied in the
2012 <methodname>commit</methodname> operation, or programmed to the
2013 hardware immediately.
2014 </para>
2015 </listitem>
2016 <listitem>
2017 <synopsis>void (*commit)(struct drm_encoder *encoder);</synopsis>
2018 <para>
2019 Commit a mode. This operation is called after setting the new mode.
2020 Upon return the device must use the new mode and be fully
2021 operational.
2022 </para>
2023 </listitem>
2024 </itemizedlist>
2025 </sect2>
2026 <sect2>
2027 <title>Connector Helper Operations</title>
2028 <itemizedlist>
2029 <listitem>
2030 <synopsis>struct drm_encoder *(*best_encoder)(struct drm_connector *connector);</synopsis>
2031 <para>
2032 Return a pointer to the best encoder for the connecter. Device that
2033 map connectors to encoders 1:1 simply return the pointer to the
2034 associated encoder. This operation is mandatory.
2035 </para>
2036 </listitem>
2037 <listitem>
2038 <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
2039 <para>
2040 Fill the connector's <structfield>probed_modes</structfield> list
2041 by parsing EDID data with <function>drm_add_edid_modes</function>,
2042 adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
2043 or calling <function>drm_mode_probed_add</function> directly for every
2044 supported mode and return the number of modes it has detected. This
2045 operation is mandatory.
2046 </para>
2047 <para>
2048 Note that the caller function will automatically add standard VESA
2049 DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
2050 helper operation returns no mode and if the connector status is
2051 connector_status_connected. There is no need to call
2052 <function>drm_add_edid_modes</function> manually in that case.
2053 </para>
2054 <para>
2055 When adding modes manually the driver creates each mode with a call to
2056 <function>drm_mode_create</function> and must fill the following fields.
2057 <itemizedlist>
2058 <listitem>
2059 <synopsis>__u32 type;</synopsis>
2060 <para>
2061 Mode type bitmask, a combination of
2062 <variablelist>
2063 <varlistentry>
2064 <term>DRM_MODE_TYPE_BUILTIN</term>
2065 <listitem><para>not used?</para></listitem>
2066 </varlistentry>
2067 <varlistentry>
2068 <term>DRM_MODE_TYPE_CLOCK_C</term>
2069 <listitem><para>not used?</para></listitem>
2070 </varlistentry>
2071 <varlistentry>
2072 <term>DRM_MODE_TYPE_CRTC_C</term>
2073 <listitem><para>not used?</para></listitem>
2074 </varlistentry>
2075 <varlistentry>
2076 <term>
2077 DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector
2078 </term>
2079 <listitem>
2080 <para>not used?</para>
2081 </listitem>
2082 </varlistentry>
2083 <varlistentry>
2084 <term>DRM_MODE_TYPE_DEFAULT</term>
2085 <listitem><para>not used?</para></listitem>
2086 </varlistentry>
2087 <varlistentry>
2088 <term>DRM_MODE_TYPE_USERDEF</term>
2089 <listitem><para>not used?</para></listitem>
2090 </varlistentry>
2091 <varlistentry>
2092 <term>DRM_MODE_TYPE_DRIVER</term>
2093 <listitem>
2094 <para>
2095 The mode has been created by the driver (as opposed to
2096 to user-created modes).
2097 </para>
2098 </listitem>
2099 </varlistentry>
2100 </variablelist>
2101 Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they
2102 create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred
2103 mode.
2104 </para>
2105 </listitem>
2106 <listitem>
2107 <synopsis>__u32 clock;</synopsis>
2108 <para>Pixel clock frequency in kHz unit</para>
2109 </listitem>
2110 <listitem>
2111 <synopsis>__u16 hdisplay, hsync_start, hsync_end, htotal;
2112 __u16 vdisplay, vsync_start, vsync_end, vtotal;</synopsis>
2113 <para>Horizontal and vertical timing information</para>
2114 <screen><![CDATA[
2115 Active Front Sync Back
2116 Region Porch Porch
2117 <-----------------------><----------------><-------------><-------------->
2118
2119 //////////////////////|
2120 ////////////////////// |
2121 ////////////////////// |.................. ................
2122 _______________
2123
2124 <----- [hv]display ----->
2125 <------------- [hv]sync_start ------------>
2126 <--------------------- [hv]sync_end --------------------->
2127 <-------------------------------- [hv]total ----------------------------->
2128]]></screen>
2129 </listitem>
2130 <listitem>
2131 <synopsis>__u16 hskew;
2132 __u16 vscan;</synopsis>
2133 <para>Unknown</para>
2134 </listitem>
2135 <listitem>
2136 <synopsis>__u32 flags;</synopsis>
2137 <para>
2138 Mode flags, a combination of
2139 <variablelist>
2140 <varlistentry>
2141 <term>DRM_MODE_FLAG_PHSYNC</term>
2142 <listitem><para>
2143 Horizontal sync is active high
2144 </para></listitem>
2145 </varlistentry>
2146 <varlistentry>
2147 <term>DRM_MODE_FLAG_NHSYNC</term>
2148 <listitem><para>
2149 Horizontal sync is active low
2150 </para></listitem>
2151 </varlistentry>
2152 <varlistentry>
2153 <term>DRM_MODE_FLAG_PVSYNC</term>
2154 <listitem><para>
2155 Vertical sync is active high
2156 </para></listitem>
2157 </varlistentry>
2158 <varlistentry>
2159 <term>DRM_MODE_FLAG_NVSYNC</term>
2160 <listitem><para>
2161 Vertical sync is active low
2162 </para></listitem>
2163 </varlistentry>
2164 <varlistentry>
2165 <term>DRM_MODE_FLAG_INTERLACE</term>
2166 <listitem><para>
2167 Mode is interlaced
2168 </para></listitem>
2169 </varlistentry>
2170 <varlistentry>
2171 <term>DRM_MODE_FLAG_DBLSCAN</term>
2172 <listitem><para>
2173 Mode uses doublescan
2174 </para></listitem>
2175 </varlistentry>
2176 <varlistentry>
2177 <term>DRM_MODE_FLAG_CSYNC</term>
2178 <listitem><para>
2179 Mode uses composite sync
2180 </para></listitem>
2181 </varlistentry>
2182 <varlistentry>
2183 <term>DRM_MODE_FLAG_PCSYNC</term>
2184 <listitem><para>
2185 Composite sync is active high
2186 </para></listitem>
2187 </varlistentry>
2188 <varlistentry>
2189 <term>DRM_MODE_FLAG_NCSYNC</term>
2190 <listitem><para>
2191 Composite sync is active low
2192 </para></listitem>
2193 </varlistentry>
2194 <varlistentry>
2195 <term>DRM_MODE_FLAG_HSKEW</term>
2196 <listitem><para>
2197 hskew provided (not used?)
2198 </para></listitem>
2199 </varlistentry>
2200 <varlistentry>
2201 <term>DRM_MODE_FLAG_BCAST</term>
2202 <listitem><para>
2203 not used?
2204 </para></listitem>
2205 </varlistentry>
2206 <varlistentry>
2207 <term>DRM_MODE_FLAG_PIXMUX</term>
2208 <listitem><para>
2209 not used?
2210 </para></listitem>
2211 </varlistentry>
2212 <varlistentry>
2213 <term>DRM_MODE_FLAG_DBLCLK</term>
2214 <listitem><para>
2215 not used?
2216 </para></listitem>
2217 </varlistentry>
2218 <varlistentry>
2219 <term>DRM_MODE_FLAG_CLKDIV2</term>
2220 <listitem><para>
2221 ?
2222 </para></listitem>
2223 </varlistentry>
2224 </variablelist>
2225 </para>
2226 <para>
2227 Note that modes marked with the INTERLACE or DBLSCAN flags will be
2228 filtered out by
2229 <function>drm_helper_probe_single_connector_modes</function> if
2230 the connector's <structfield>interlace_allowed</structfield> or
2231 <structfield>doublescan_allowed</structfield> field is set to 0.
2232 </para>
2233 </listitem>
2234 <listitem>
2235 <synopsis>char name[DRM_DISPLAY_MODE_LEN];</synopsis>
2236 <para>
2237 Mode name. The driver must call
2238 <function>drm_mode_set_name</function> to fill the mode name from
2239 <structfield>hdisplay</structfield>,
2240 <structfield>vdisplay</structfield> and interlace flag after
2241 filling the corresponding fields.
2242 </para>
2243 </listitem>
2244 </itemizedlist>
2245 </para>
2246 <para>
2247 The <structfield>vrefresh</structfield> value is computed by
2248 <function>drm_helper_probe_single_connector_modes</function>.
2249 </para>
2250 <para>
2251 When parsing EDID data, <function>drm_add_edid_modes</function> fills the
2252 connector <structfield>display_info</structfield>
2253 <structfield>width_mm</structfield> and
2254 <structfield>height_mm</structfield> fields. When creating modes
2255 manually the <methodname>get_modes</methodname> helper operation must
2256 set the <structfield>display_info</structfield>
2257 <structfield>width_mm</structfield> and
2258 <structfield>height_mm</structfield> fields if they haven't been set
2259 already (for instance at initialization time when a fixed-size panel is
2260 attached to the connector). The mode <structfield>width_mm</structfield>
2261 and <structfield>height_mm</structfield> fields are only used internally
2262 during EDID parsing and should not be set when creating modes manually.
2263 </para>
2264 </listitem>
2265 <listitem>
2266 <synopsis>int (*mode_valid)(struct drm_connector *connector,
2267 struct drm_display_mode *mode);</synopsis>
2268 <para>
2269 Verify whether a mode is valid for the connector. Return MODE_OK for
2270 supported modes and one of the enum drm_mode_status values (MODE_*)
2271 for unsupported modes. This operation is optional.
2272 </para>
2273 <para>
2274 As the mode rejection reason is currently not used beside for
2275 immediately removing the unsupported mode, an implementation can
2276 return MODE_BAD regardless of the exact reason why the mode is not
2277 valid.
2278 </para>
2279 <note><para>
2280 Note that the <methodname>mode_valid</methodname> helper operation is
2281 only called for modes detected by the device, and
2282 <emphasis>not</emphasis> for modes set by the user through the CRTC
2283 <methodname>set_config</methodname> operation.
2284 </para></note>
2285 </listitem>
2286 </itemizedlist>
2287 </sect2>
2288 <sect2>
2289 <title>Atomic Modeset Helper Functions Reference</title> 1582 <title>Atomic Modeset Helper Functions Reference</title>
2290 <sect3> 1583 <sect3>
2291 <title>Overview</title> 1584 <title>Overview</title>
@@ -2303,8 +1596,12 @@ void intel_crt_init(struct drm_device *dev)
2303!Edrivers/gpu/drm/drm_atomic_helper.c 1596!Edrivers/gpu/drm/drm_atomic_helper.c
2304 </sect2> 1597 </sect2>
2305 <sect2> 1598 <sect2>
2306 <title>Modeset Helper Functions Reference</title> 1599 <title>Modeset Helper Reference for Common Vtables</title>
2307!Iinclude/drm/drm_crtc_helper.h 1600!Iinclude/drm/drm_modeset_helper_vtables.h
1601!Pinclude/drm/drm_modeset_helper_vtables.h overview
1602 </sect2>
1603 <sect2>
1604 <title>Legacy CRTC/Modeset Helper Functions Reference</title>
2308!Edrivers/gpu/drm/drm_crtc_helper.c 1605!Edrivers/gpu/drm/drm_crtc_helper.c
2309!Pdrivers/gpu/drm/drm_crtc_helper.c overview 1606!Pdrivers/gpu/drm/drm_crtc_helper.c overview
2310 </sect2> 1607 </sect2>
@@ -4015,92 +3312,6 @@ int num_ioctls;</synopsis>
4015 <sect2> 3312 <sect2>
4016 <title>DPIO</title> 3313 <title>DPIO</title>
4017!Pdrivers/gpu/drm/i915/i915_reg.h DPIO 3314!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
4018 <table id="dpiox2">
4019 <title>Dual channel PHY (VLV/CHV/BXT)</title>
4020 <tgroup cols="8">
4021 <colspec colname="c0" />
4022 <colspec colname="c1" />
4023 <colspec colname="c2" />
4024 <colspec colname="c3" />
4025 <colspec colname="c4" />
4026 <colspec colname="c5" />
4027 <colspec colname="c6" />
4028 <colspec colname="c7" />
4029 <spanspec spanname="ch0" namest="c0" nameend="c3" />
4030 <spanspec spanname="ch1" namest="c4" nameend="c7" />
4031 <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
4032 <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
4033 <spanspec spanname="ch1pcs01" namest="c4" nameend="c5" />
4034 <spanspec spanname="ch1pcs23" namest="c6" nameend="c7" />
4035 <thead>
4036 <row>
4037 <entry spanname="ch0">CH0</entry>
4038 <entry spanname="ch1">CH1</entry>
4039 </row>
4040 </thead>
4041 <tbody valign="top" align="center">
4042 <row>
4043 <entry spanname="ch0">CMN/PLL/REF</entry>
4044 <entry spanname="ch1">CMN/PLL/REF</entry>
4045 </row>
4046 <row>
4047 <entry spanname="ch0pcs01">PCS01</entry>
4048 <entry spanname="ch0pcs23">PCS23</entry>
4049 <entry spanname="ch1pcs01">PCS01</entry>
4050 <entry spanname="ch1pcs23">PCS23</entry>
4051 </row>
4052 <row>
4053 <entry>TX0</entry>
4054 <entry>TX1</entry>
4055 <entry>TX2</entry>
4056 <entry>TX3</entry>
4057 <entry>TX0</entry>
4058 <entry>TX1</entry>
4059 <entry>TX2</entry>
4060 <entry>TX3</entry>
4061 </row>
4062 <row>
4063 <entry spanname="ch0">DDI0</entry>
4064 <entry spanname="ch1">DDI1</entry>
4065 </row>
4066 </tbody>
4067 </tgroup>
4068 </table>
4069 <table id="dpiox1">
4070 <title>Single channel PHY (CHV/BXT)</title>
4071 <tgroup cols="4">
4072 <colspec colname="c0" />
4073 <colspec colname="c1" />
4074 <colspec colname="c2" />
4075 <colspec colname="c3" />
4076 <spanspec spanname="ch0" namest="c0" nameend="c3" />
4077 <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
4078 <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
4079 <thead>
4080 <row>
4081 <entry spanname="ch0">CH0</entry>
4082 </row>
4083 </thead>
4084 <tbody valign="top" align="center">
4085 <row>
4086 <entry spanname="ch0">CMN/PLL/REF</entry>
4087 </row>
4088 <row>
4089 <entry spanname="ch0pcs01">PCS01</entry>
4090 <entry spanname="ch0pcs23">PCS23</entry>
4091 </row>
4092 <row>
4093 <entry>TX0</entry>
4094 <entry>TX1</entry>
4095 <entry>TX2</entry>
4096 <entry>TX3</entry>
4097 </row>
4098 <row>
4099 <entry spanname="ch0">DDI2</entry>
4100 </row>
4101 </tbody>
4102 </tgroup>
4103 </table>
4104 </sect2> 3315 </sect2>
4105 3316
4106 <sect2> 3317 <sect2>
@@ -4232,41 +3443,63 @@ int num_ioctls;</synopsis>
4232 3443
4233 <chapter id="modes_of_use"> 3444 <chapter id="modes_of_use">
4234 <title>Modes of Use</title> 3445 <title>Modes of Use</title>
4235 <sect1> 3446 <sect1>
4236 <title>Manual switching and manual power control</title> 3447 <title>Manual switching and manual power control</title>
4237!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control 3448!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
4238 </sect1> 3449 </sect1>
4239 <sect1> 3450 <sect1>
4240 <title>Driver power control</title> 3451 <title>Driver power control</title>
4241!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control 3452!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
4242 </sect1> 3453 </sect1>
4243 </chapter> 3454 </chapter>
4244 3455
4245 <chapter id="pubfunctions"> 3456 <chapter id="api">
4246 <title>Public functions</title> 3457 <title>API</title>
3458 <sect1>
3459 <title>Public functions</title>
4247!Edrivers/gpu/vga/vga_switcheroo.c 3460!Edrivers/gpu/vga/vga_switcheroo.c
4248 </chapter> 3461 </sect1>
4249 3462 <sect1>
4250 <chapter id="pubstructures"> 3463 <title>Public structures</title>
4251 <title>Public structures</title>
4252!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler 3464!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
4253!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops 3465!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
4254 </chapter> 3466 </sect1>
4255 3467 <sect1>
4256 <chapter id="pubconstants"> 3468 <title>Public constants</title>
4257 <title>Public constants</title>
4258!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id 3469!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
4259!Finclude/linux/vga_switcheroo.h vga_switcheroo_state 3470!Finclude/linux/vga_switcheroo.h vga_switcheroo_state
4260 </chapter> 3471 </sect1>
4261 3472 <sect1>
4262 <chapter id="privstructures"> 3473 <title>Private structures</title>
4263 <title>Private structures</title>
4264!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv 3474!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
4265!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client 3475!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
3476 </sect1>
3477 </chapter>
3478
3479 <chapter id="handlers">
3480 <title>Handlers</title>
3481 <sect1>
3482 <title>apple-gmux Handler</title>
3483!Pdrivers/platform/x86/apple-gmux.c Overview
3484!Pdrivers/platform/x86/apple-gmux.c Interrupt
3485 <sect2>
3486 <title>Graphics mux</title>
3487!Pdrivers/platform/x86/apple-gmux.c Graphics mux
3488 </sect2>
3489 <sect2>
3490 <title>Power control</title>
3491!Pdrivers/platform/x86/apple-gmux.c Power control
3492 </sect2>
3493 <sect2>
3494 <title>Backlight control</title>
3495!Pdrivers/platform/x86/apple-gmux.c Backlight control
3496 </sect2>
3497 </sect1>
4266 </chapter> 3498 </chapter>
4267 3499
4268!Cdrivers/gpu/vga/vga_switcheroo.c 3500!Cdrivers/gpu/vga/vga_switcheroo.c
4269!Cinclude/linux/vga_switcheroo.h 3501!Cinclude/linux/vga_switcheroo.h
3502!Cdrivers/platform/x86/apple-gmux.c
4270</part> 3503</part>
4271 3504
4272</book> 3505</book>
diff --git a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
new file mode 100644
index 000000000000..ed5e0a7894ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
@@ -0,0 +1,54 @@
1Etnaviv DRM master device
2=========================
3
4The Etnaviv DRM master device is a virtual device needed to list all
5Vivante GPU cores that comprise the GPU subsystem.
6
7Required properties:
8- compatible: Should be one of
9 "fsl,imx-gpu-subsystem"
10 "marvell,dove-gpu-subsystem"
11- cores: Should contain a list of phandles pointing to Vivante GPU devices
12
13example:
14
15gpu-subsystem {
16 compatible = "fsl,imx-gpu-subsystem";
17 cores = <&gpu_2d>, <&gpu_3d>;
18};
19
20
21Vivante GPU core devices
22========================
23
24Required properties:
25- compatible: Should be "vivante,gc"
26 A more specific compatible is not needed, as the cores contain chip
27 identification registers at fixed locations, which provide all the
28 necessary information to the driver.
29- reg: should be register base and length as documented in the
30 datasheet
31- interrupts: Should contain the cores interrupt line
32- clocks: should contain one clock for entry in clock-names
33 see Documentation/devicetree/bindings/clock/clock-bindings.txt
34- clock-names:
35 - "bus": AXI/register clock
36 - "core": GPU core clock
37 - "shader": Shader clock (only required if GPU has feature PIPE_3D)
38
39Optional properties:
40- power-domains: a power domain consumer specifier according to
41 Documentation/devicetree/bindings/power/power_domain.txt
42
43example:
44
45gpu_3d: gpu@00130000 {
46 compatible = "vivante,gc";
47 reg = <0x00130000 0x4000>;
48 interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
49 clocks = <&clks IMX6QDL_CLK_GPU3D_AXI>,
50 <&clks IMX6QDL_CLK_GPU3D_CORE>,
51 <&clks IMX6QDL_CLK_GPU3D_SHADER>;
52 clock-names = "bus", "core", "shader";
53 power-domains = <&gpc 1>;
54};
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
index 64693f2ebc51..fe4a7a2dea9c 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
@@ -1,3 +1,20 @@
1Device-Tree bindings for Samsung Exynos Embedded DisplayPort Transmitter(eDP)
2
3DisplayPort is industry standard to accommodate the growing board adoption
4of digital display technology within the PC and CE industries.
5It consolidates the internal and external connection methods to reduce device
6complexity and cost. It also supports necessary features for important cross
7industry applications and provides performance scalability to enable the next
8generation of displays that feature higher color depths, refresh rates, and
9display resolutions.
10
11eDP (embedded display port) device is compliant with Embedded DisplayPort
12standard as follows,
13- DisplayPort standard 1.1a for Exynos5250 and Exynos5260.
14- DisplayPort standard 1.3 for Exynos5422s and Exynos5800.
15
16eDP resides between FIMD and panel or FIMD and bridge such as LVDS.
17
1The Exynos display port interface should be configured based on 18The Exynos display port interface should be configured based on
2the type of panel connected to it. 19the type of panel connected to it.
3 20
@@ -66,8 +83,15 @@ Optional properties for dp-controller:
66 Hotplug detect GPIO. 83 Hotplug detect GPIO.
67 Indicates which GPIO should be used for hotplug 84 Indicates which GPIO should be used for hotplug
68 detection 85 detection
69 -video interfaces: Device node can contain video interface port 86Video interfaces:
70 nodes according to [1]. 87 Device node can contain video interface port nodes according to [1].
88 The following are properties specific to those nodes:
89
90 endpoint node connected to bridge or panel node:
91 - remote-endpoint: specifies the endpoint in panel or bridge node.
92 This node is required in all kinds of exynos dp
93 to represent the connection between dp and bridge
94 or dp and panel.
71 95
72[1]: Documentation/devicetree/bindings/media/video-interfaces.txt 96[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
73 97
@@ -111,9 +135,18 @@ Board Specific portion:
111 }; 135 };
112 136
113 ports { 137 ports {
114 port@0 { 138 port {
115 dp_out: endpoint { 139 dp_out: endpoint {
116 remote-endpoint = <&bridge_in>; 140 remote-endpoint = <&dp_in>;
141 };
142 };
143 };
144
145 panel {
146 ...
147 port {
148 dp_in: endpoint {
149 remote-endpoint = <&dp_out>;
117 }; 150 };
118 }; 151 };
119 }; 152 };
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index f344b9e49198..e7423bea1424 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -14,17 +14,20 @@ Required properties:
14- clocks: device clocks 14- clocks: device clocks
15 See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. 15 See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
16- clock-names: the following clocks are required: 16- clock-names: the following clocks are required:
17 * "mdp_core_clk"
18 * "iface_clk"
17 * "bus_clk" 19 * "bus_clk"
18 * "byte_clk"
19 * "core_clk"
20 * "core_mmss_clk" 20 * "core_mmss_clk"
21 * "iface_clk" 21 * "byte_clk"
22 * "mdp_core_clk"
23 * "pixel_clk" 22 * "pixel_clk"
23 * "core_clk"
24 For DSIv2, we need an additional clock:
25 * "src_clk"
24- vdd-supply: phandle to vdd regulator device node 26- vdd-supply: phandle to vdd regulator device node
25- vddio-supply: phandle to vdd-io regulator device node 27- vddio-supply: phandle to vdd-io regulator device node
26- vdda-supply: phandle to vdda regulator device node 28- vdda-supply: phandle to vdda regulator device node
27- qcom,dsi-phy: phandle to DSI PHY device node 29- qcom,dsi-phy: phandle to DSI PHY device node
30- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
28 31
29Optional properties: 32Optional properties:
30- panel@0: Node of panel connected to this DSI controller. 33- panel@0: Node of panel connected to this DSI controller.
@@ -51,6 +54,7 @@ Required properties:
51 * "qcom,dsi-phy-28nm-hpm" 54 * "qcom,dsi-phy-28nm-hpm"
52 * "qcom,dsi-phy-28nm-lp" 55 * "qcom,dsi-phy-28nm-lp"
53 * "qcom,dsi-phy-20nm" 56 * "qcom,dsi-phy-20nm"
57 * "qcom,dsi-phy-28nm-8960"
54- reg: Physical base address and length of the registers of PLL, PHY and PHY 58- reg: Physical base address and length of the registers of PLL, PHY and PHY
55 regulator 59 regulator
56- reg-names: The names of register regions. The following regions are required: 60- reg-names: The names of register regions. The following regions are required:
diff --git a/Documentation/devicetree/bindings/display/msm/mdp.txt b/Documentation/devicetree/bindings/display/msm/mdp.txt
index 0833edaba4c3..a214f6cd0363 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp.txt
@@ -2,18 +2,28 @@ Qualcomm adreno/snapdragon display controller
2 2
3Required properties: 3Required properties:
4- compatible: 4- compatible:
5 * "qcom,mdp" - mdp4 5 * "qcom,mdp4" - mdp4
6 * "qcom,mdp5" - mdp5
6- reg: Physical base address and length of the controller's registers. 7- reg: Physical base address and length of the controller's registers.
7- interrupts: The interrupt signal from the display controller. 8- interrupts: The interrupt signal from the display controller.
8- connectors: array of phandles for output device(s) 9- connectors: array of phandles for output device(s)
9- clocks: device clocks 10- clocks: device clocks
10 See ../clocks/clock-bindings.txt for details. 11 See ../clocks/clock-bindings.txt for details.
11- clock-names: the following clocks are required: 12- clock-names: the following clocks are required.
12 * "core_clk" 13 For MDP4:
13 * "iface_clk" 14 * "core_clk"
14 * "src_clk" 15 * "iface_clk"
15 * "hdmi_clk" 16 * "lut_clk"
16 * "mpd_clk" 17 * "src_clk"
18 * "hdmi_clk"
19 * "mdp_clk"
20 For MDP5:
21 * "bus_clk"
22 * "iface_clk"
23 * "core_clk_src"
24 * "core_clk"
25 * "lut_clk" (some MDP5 versions may not need this)
26 * "vsync_clk"
17 27
18Optional properties: 28Optional properties:
19- gpus: phandle for gpu device 29- gpus: phandle for gpu device
@@ -26,7 +36,7 @@ Example:
26 ... 36 ...
27 37
28 mdp: qcom,mdp@5100000 { 38 mdp: qcom,mdp@5100000 {
29 compatible = "qcom,mdp"; 39 compatible = "qcom,mdp4";
30 reg = <0x05100000 0xf0000>; 40 reg = <0x05100000 0xf0000>;
31 interrupts = <GIC_SPI 75 0>; 41 interrupts = <GIC_SPI 75 0>;
32 connectors = <&hdmi>; 42 connectors = <&hdmi>;
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
new file mode 100644
index 000000000000..50be5e2438b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
@@ -0,0 +1,7 @@
1Boe Corporation 8.0" WUXGA TFT LCD panel
2
3Required properties:
4- compatible: should be "boe,tv080wum-nl0"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
new file mode 100644
index 000000000000..649744620ae1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
@@ -0,0 +1,7 @@
1Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
2
3Required properties:
4- compatible: should be "innolux,g121x1-l03"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
new file mode 100644
index 000000000000..a8e940fe731e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
@@ -0,0 +1,7 @@
1Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
2
3Required properties:
4- compatible: should be "kyo,tcg121xglp"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
new file mode 100644
index 000000000000..37dedf6a6702
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
@@ -0,0 +1,20 @@
1Panasonic 10" WUXGA TFT LCD panel
2
3Required properties:
4- compatible: should be "panasonic,vvx10f034n00"
5- reg: DSI virtual channel of the peripheral
6- power-supply: phandle of the regulator that provides the supply voltage
7
8Optional properties:
9- backlight: phandle of the backlight device attached to the panel
10
11Example:
12
13 mdss_dsi@fd922800 {
14 panel@0 {
15 compatible = "panasonic,vvx10f034n00";
16 reg = <0>;
17 power-supply = <&vreg_vsp>;
18 backlight = <&lp8566_wled>;
19 };
20 };
diff --git a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
new file mode 100644
index 000000000000..0fbdab89ac3d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
@@ -0,0 +1,7 @@
1QiaoDian XianShi Corporation 4"3 TFT LCD panel
2
3Required properties:
4- compatible: should be "qiaodian,qd43003c0-40"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt
new file mode 100644
index 000000000000..3770a111968b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt
@@ -0,0 +1,22 @@
1Sharp Microelectronics 4.3" qHD TFT LCD panel
2
3Required properties:
4- compatible: should be "sharp,ls043t1le01-qhd"
5- reg: DSI virtual channel of the peripheral
6- power-supply: phandle of the regulator that provides the supply voltage
7
8Optional properties:
9- backlight: phandle of the backlight device attached to the panel
10- reset-gpios: a GPIO spec for the reset pin
11
12Example:
13
14 mdss_dsi@fd922800 {
15 panel@0 {
16 compatible = "sharp,ls043t1le01-qhd";
17 reg = <0>;
18 avdd-supply = <&pm8941_l22>;
19 backlight = <&pm8941_wled>;
20 reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>;
21 };
22 };
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
new file mode 100644
index 000000000000..1753f0cc6fad
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
@@ -0,0 +1,60 @@
1Rockchip specific extensions to the Synopsys Designware MIPI DSI
2================================
3
4Required properties:
5- #address-cells: Should be <1>.
6- #size-cells: Should be <0>.
7- compatible: "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi".
8- reg: Represent the physical address range of the controller.
9- interrupts: Represent the controller's interrupt to the CPU(s).
10- clocks, clock-names: Phandles to the controller's pll reference
11 clock(ref) and APB clock(pclk), as described in [1].
12- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
13- ports: contain a port node with endpoint definitions as defined in [2].
14 For vopb,set the reg = <0> and set the reg = <1> for vopl.
15
16[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
17[2] Documentation/devicetree/bindings/media/video-interfaces.txt
18
19Example:
20 mipi_dsi: mipi@ff960000 {
21 #address-cells = <1>;
22 #size-cells = <0>;
23 compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
24 reg = <0xff960000 0x4000>;
25 interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
26 clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>;
27 clock-names = "ref", "pclk";
28 rockchip,grf = <&grf>;
29 status = "okay";
30
31 ports {
32 #address-cells = <1>;
33 #size-cells = <0>;
34 reg = <1>;
35
36 mipi_in: port {
37 #address-cells = <1>;
38 #size-cells = <0>;
39 mipi_in_vopb: endpoint@0 {
40 reg = <0>;
41 remote-endpoint = <&vopb_out_mipi>;
42 };
43 mipi_in_vopl: endpoint@1 {
44 reg = <1>;
45 remote-endpoint = <&vopl_out_mipi>;
46 };
47 };
48 };
49
50 panel {
51 compatible ="boe,tv080wum-nl0";
52 reg = <0>;
53
54 enable-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>;
55 pinctrl-names = "default";
56 pinctrl-0 = <&lcd_en>;
57 backlight = <&backlight>;
58 status = "okay";
59 };
60 };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index d15351f2313d..5489b59e3d41 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -7,6 +7,7 @@ buffer to an external LCD interface.
7Required properties: 7Required properties:
8- compatible: value should be one of the following 8- compatible: value should be one of the following
9 "rockchip,rk3288-vop"; 9 "rockchip,rk3288-vop";
10 "rockchip,rk3036-vop";
10 11
11- interrupts: should contain a list of all VOP IP block interrupts in the 12- interrupts: should contain a list of all VOP IP block interrupts in the
12 order: VSYNC, LCD_SYSTEM. The interrupt specifier 13 order: VSYNC, LCD_SYSTEM. The interrupt specifier
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index d3d0a4fb1c73..079b42a81d7c 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -22,8 +22,7 @@ Required properties:
22Optional properties: 22Optional properties:
23- ti,hwmods: Name of the hwmods associated to the eDMA CC 23- ti,hwmods: Name of the hwmods associated to the eDMA CC
24- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow 24- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
25 these channels will be SW triggered channels. The list must 25 these channels will be SW triggered channels. See example.
26 contain 16 bits numbers, see example.
27- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by 26- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
28 the driver, they are allocated to be used by for example the 27 the driver, they are allocated to be used by for example the
29 DSP. See example. 28 DSP. See example.
@@ -56,10 +55,9 @@ edma: edma@49000000 {
56 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; 55 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
57 56
58 /* Channel 20 and 21 is allocated for memcpy */ 57 /* Channel 20 and 21 is allocated for memcpy */
59 ti,edma-memcpy-channels = /bits/ 16 <20 21>; 58 ti,edma-memcpy-channels = <20 21>;
60 /* The following PaRAM slots are reserved: 35-45 and 100-110 */ 59 /* The following PaRAM slots are reserved: 35-44 and 100-109 */
61 ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, 60 ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
62 /bits/ 16 <100 10>;
63}; 61};
64 62
65edma_tptc0: tptc@49800000 { 63edma_tptc0: tptc@49800000 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
index f2455c50533d..120bc4971cf3 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
@@ -11,6 +11,10 @@ Required properties:
11 0 = active high 11 0 = active high
12 1 = active low 12 1 = active low
13 13
14Optional properties:
15- little-endian : GPIO registers are used as little endian. If not
16 present registers are used as big endian by default.
17
14Example: 18Example:
15 19
16gpio0: gpio@1100 { 20gpio0: gpio@1100 {
diff --git a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
index b9c32f6fd687..4357e498ef04 100644
--- a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
+++ b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
@@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
12Required subnode-properties: 12Required subnode-properties:
13 - label: Descriptive name of the key. 13 - label: Descriptive name of the key.
14 - linux,code: Keycode to emit. 14 - linux,code: Keycode to emit.
15 - channel: Channel this key is attached to, mut be 0 or 1. 15 - channel: Channel this key is attached to, must be 0 or 1.
16 - voltage: Voltage in µV at lradc input when this key is pressed. 16 - voltage: Voltage in µV at lradc input when this key is pressed.
17 17
18Example: 18Example:
diff --git a/Documentation/devicetree/bindings/media/exynos5-gsc.txt b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
index 0604d42f38d1..5fe9372abb37 100644
--- a/Documentation/devicetree/bindings/media/exynos5-gsc.txt
+++ b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
@@ -7,6 +7,10 @@ Required properties:
7- reg: should contain G-Scaler physical address location and length. 7- reg: should contain G-Scaler physical address location and length.
8- interrupts: should contain G-Scaler interrupt number 8- interrupts: should contain G-Scaler interrupt number
9 9
10Optional properties:
11- samsung,sysreg: handle to syscon used to control the system registers to
12 set writeback input and destination
13
10Example: 14Example:
11 15
12gsc_0: gsc@0x13e00000 { 16gsc_0: gsc@0x13e00000 {
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index f1e2a02381a4..1c63e40659fc 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such
6as RedBoot. 6as RedBoot.
7 7
8The partition table should be a subnode of the mtd node and should be named 8The partition table should be a subnode of the mtd node and should be named
9'partitions'. Partitions are defined in subnodes of the partitions node. 9'partitions'. This node should have the following property:
10- compatible : (required) must be "fixed-partitions"
11Partitions are then defined in subnodes of the partitions node.
10 12
11For backwards compatibility partitions as direct subnodes of the mtd device are 13For backwards compatibility partitions as direct subnodes of the mtd device are
12supported. This use is discouraged. 14supported. This use is discouraged.
@@ -36,6 +38,7 @@ Examples:
36 38
37flash@0 { 39flash@0 {
38 partitions { 40 partitions {
41 compatible = "fixed-partitions";
39 #address-cells = <1>; 42 #address-cells = <1>;
40 #size-cells = <1>; 43 #size-cells = <1>;
41 44
@@ -53,6 +56,7 @@ flash@0 {
53 56
54flash@1 { 57flash@1 {
55 partitions { 58 partitions {
59 compatible = "fixed-partitions";
56 #address-cells = <1>; 60 #address-cells = <1>;
57 #size-cells = <2>; 61 #size-cells = <2>;
58 62
@@ -66,6 +70,7 @@ flash@1 {
66 70
67flash@2 { 71flash@2 {
68 partitions { 72 partitions {
73 compatible = "fixed-partitions";
69 #address-cells = <2>; 74 #address-cells = <2>;
70 #size-cells = <2>; 75 #size-cells = <2>;
71 76
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 55df1d444e9f..b123731b2dca 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -33,6 +33,7 @@ auo AU Optronics Corporation
33avago Avago Technologies 33avago Avago Technologies
34avic Shanghai AVIC Optoelectronics Co., Ltd. 34avic Shanghai AVIC Optoelectronics Co., Ltd.
35axis Axis Communications AB 35axis Axis Communications AB
36boe BOE Technology Group Co., Ltd.
36bosch Bosch Sensortec GmbH 37bosch Bosch Sensortec GmbH
37boundary Boundary Devices Inc. 38boundary Boundary Devices Inc.
38brcm Broadcom Corporation 39brcm Broadcom Corporation
@@ -123,6 +124,7 @@ jedec JEDEC Solid State Technology Association
123karo Ka-Ro electronics GmbH 124karo Ka-Ro electronics GmbH
124keymile Keymile GmbH 125keymile Keymile GmbH
125kinetic Kinetic Technologies 126kinetic Kinetic Technologies
127kyo Kyocera Corporation
126lacie LaCie 128lacie LaCie
127lantiq Lantiq Semiconductor 129lantiq Lantiq Semiconductor
128lenovo Lenovo Group Ltd. 130lenovo Lenovo Group Ltd.
@@ -180,6 +182,7 @@ qca Qualcomm Atheros, Inc.
180qcom Qualcomm Technologies, Inc 182qcom Qualcomm Technologies, Inc
181qemu QEMU, a generic and open source machine emulator and virtualizer 183qemu QEMU, a generic and open source machine emulator and virtualizer
182qi Qi Hardware 184qi Qi Hardware
185qiaodian QiaoDian XianShi Corporation
183qnap QNAP Systems, Inc. 186qnap QNAP Systems, Inc.
184radxa Radxa 187radxa Radxa
185raidsonic RaidSonic Technology GmbH 188raidsonic RaidSonic Technology GmbH
@@ -238,6 +241,7 @@ v3 V3 Semiconductor
238variscite Variscite Ltd. 241variscite Variscite Ltd.
239via VIA Technologies, Inc. 242via VIA Technologies, Inc.
240virtio Virtual I/O Device Specification, developed by the OASIS consortium 243virtio Virtual I/O Device Specification, developed by the OASIS consortium
244vivante Vivante Corporation
241voipac Voipac Technologies s.r.o. 245voipac Voipac Technologies s.r.o.
242wexler Wexler 246wexler Wexler
243winbond Winbond Electronics corp. 247winbond Winbond Electronics corp.
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index f862cf3aff34..42ddbd4b52a9 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
181If an issue is identified with the released source code on the supported 181If an issue is identified with the released source code on the supported
182kernel with a supported adapter, email the specific information related to the 182kernel with a supported adapter, email the specific information related to the
183issue to e1000-devel@lists.sourceforge.net. 183issue to e1000-devel@lists.sourceforge.net.
184
185
186License
187=======
188
189This software program is released under the terms of a license agreement
190between you ('Licensee') and Intel. Do not use or load this software or any
191associated materials (collectively, the 'Software') until you have carefully
192read the full terms and conditions of the file COPYING located in this software
193package. By loading or using the Software, you agree to the terms of this
194Agreement. If you do not agree with the terms of this Agreement, do not install
195or use the Software.
196
197* Other names and brands may be claimed as the property of others.
diff --git a/MAINTAINERS b/MAINTAINERS
index 69c8a9c3289a..32eda9d0be0c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2975,6 +2975,7 @@ F: kernel/cpuset.c
2975CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) 2975CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
2976M: Johannes Weiner <hannes@cmpxchg.org> 2976M: Johannes Weiner <hannes@cmpxchg.org>
2977M: Michal Hocko <mhocko@kernel.org> 2977M: Michal Hocko <mhocko@kernel.org>
2978M: Vladimir Davydov <vdavydov@virtuozzo.com>
2978L: cgroups@vger.kernel.org 2979L: cgroups@vger.kernel.org
2979L: linux-mm@kvack.org 2980L: linux-mm@kvack.org
2980S: Maintained 2981S: Maintained
@@ -3743,6 +3744,15 @@ S: Maintained
3743F: drivers/gpu/drm/sti 3744F: drivers/gpu/drm/sti
3744F: Documentation/devicetree/bindings/display/st,stih4xx.txt 3745F: Documentation/devicetree/bindings/display/st,stih4xx.txt
3745 3746
3747DRM DRIVERS FOR VIVANTE GPU IP
3748M: Lucas Stach <l.stach@pengutronix.de>
3749R: Russell King <linux+etnaviv@arm.linux.org.uk>
3750R: Christian Gmeiner <christian.gmeiner@gmail.com>
3751L: dri-devel@lists.freedesktop.org
3752S: Maintained
3753F: drivers/gpu/drm/etnaviv
3754F: Documentation/devicetree/bindings/display/etnaviv
3755
3746DSBR100 USB FM RADIO DRIVER 3756DSBR100 USB FM RADIO DRIVER
3747M: Alexey Klimov <klimov.linux@gmail.com> 3757M: Alexey Klimov <klimov.linux@gmail.com>
3748L: linux-media@vger.kernel.org 3758L: linux-media@vger.kernel.org
@@ -5577,7 +5587,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com>
5577R: Shannon Nelson <shannon.nelson@intel.com> 5587R: Shannon Nelson <shannon.nelson@intel.com>
5578R: Carolyn Wyborny <carolyn.wyborny@intel.com> 5588R: Carolyn Wyborny <carolyn.wyborny@intel.com>
5579R: Don Skidmore <donald.c.skidmore@intel.com> 5589R: Don Skidmore <donald.c.skidmore@intel.com>
5580R: Matthew Vick <matthew.vick@intel.com> 5590R: Bruce Allan <bruce.w.allan@intel.com>
5581R: John Ronciak <john.ronciak@intel.com> 5591R: John Ronciak <john.ronciak@intel.com>
5582R: Mitch Williams <mitch.a.williams@intel.com> 5592R: Mitch Williams <mitch.a.williams@intel.com>
5583L: intel-wired-lan@lists.osuosl.org 5593L: intel-wired-lan@lists.osuosl.org
@@ -8286,7 +8296,7 @@ F: include/linux/delayacct.h
8286F: kernel/delayacct.c 8296F: kernel/delayacct.c
8287 8297
8288PERFORMANCE EVENTS SUBSYSTEM 8298PERFORMANCE EVENTS SUBSYSTEM
8289M: Peter Zijlstra <a.p.zijlstra@chello.nl> 8299M: Peter Zijlstra <peterz@infradead.org>
8290M: Ingo Molnar <mingo@redhat.com> 8300M: Ingo Molnar <mingo@redhat.com>
8291M: Arnaldo Carvalho de Melo <acme@kernel.org> 8301M: Arnaldo Carvalho de Melo <acme@kernel.org>
8292L: linux-kernel@vger.kernel.org 8302L: linux-kernel@vger.kernel.org
@@ -8379,6 +8389,14 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
8379S: Maintained 8389S: Maintained
8380F: drivers/pinctrl/samsung/ 8390F: drivers/pinctrl/samsung/
8381 8391
8392PIN CONTROLLER - SINGLE
8393M: Tony Lindgren <tony@atomide.com>
8394M: Haojian Zhuang <haojian.zhuang@linaro.org>
8395L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8396L: linux-omap@vger.kernel.org
8397S: Maintained
8398F: drivers/pinctrl/pinctrl-single.c
8399
8382PIN CONTROLLER - ST SPEAR 8400PIN CONTROLLER - ST SPEAR
8383M: Viresh Kumar <vireshk@kernel.org> 8401M: Viresh Kumar <vireshk@kernel.org>
8384L: spear-devel@list.st.com 8402L: spear-devel@list.st.com
@@ -8945,6 +8963,13 @@ F: drivers/rpmsg/
8945F: Documentation/rpmsg.txt 8963F: Documentation/rpmsg.txt
8946F: include/linux/rpmsg.h 8964F: include/linux/rpmsg.h
8947 8965
8966RENESAS ETHERNET DRIVERS
8967R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
8968L: netdev@vger.kernel.org
8969L: linux-sh@vger.kernel.org
8970F: drivers/net/ethernet/renesas/
8971F: include/linux/sh_eth.h
8972
8948RESET CONTROLLER FRAMEWORK 8973RESET CONTROLLER FRAMEWORK
8949M: Philipp Zabel <p.zabel@pengutronix.de> 8974M: Philipp Zabel <p.zabel@pengutronix.de>
8950S: Maintained 8975S: Maintained
diff --git a/Makefile b/Makefile
index d644f6e92cf6..4e2b18d56091 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 4 2PATCHLEVEL = 4
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc6
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2c2ac3f3ff80..6312f607932f 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -445,6 +445,7 @@ config LINUX_LINK_BASE
445 However some customers have peripherals mapped at this addr, so 445 However some customers have peripherals mapped at this addr, so
446 Linux needs to be scooted a bit. 446 Linux needs to be scooted a bit.
447 If you don't know what the above means, leave this setting alone. 447 If you don't know what the above means, leave this setting alone.
448 This needs to match memory start address specified in Device Tree
448 449
449config HIGHMEM 450config HIGHMEM
450 bool "High Memory Support" 451 bool "High Memory Support"
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index f3db32154973..44a578c10732 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -46,6 +46,7 @@
46 snps,pbl = < 32 >; 46 snps,pbl = < 32 >;
47 clocks = <&apbclk>; 47 clocks = <&apbclk>;
48 clock-names = "stmmaceth"; 48 clock-names = "stmmaceth";
49 max-speed = <100>;
49 }; 50 };
50 51
51 ehci@0x40000 { 52 ehci@0x40000 {
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
index b0eb0e7fe21d..fc81879bc1f5 100644
--- a/arch/arc/boot/dts/nsim_hs.dts
+++ b/arch/arc/boot/dts/nsim_hs.dts
@@ -17,7 +17,8 @@
17 17
18 memory { 18 memory {
19 device_type = "memory"; 19 device_type = "memory";
20 reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ 20 /* CONFIG_LINUX_LINK_BASE needs to match low mem start */
21 reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
21 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ 22 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
22 }; 23 };
23 24
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 6ff657a904b6..c28e6c347b49 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -23,7 +23,7 @@
23 * @dt_compat: Array of device tree 'compatible' strings 23 * @dt_compat: Array of device tree 'compatible' strings
24 * (XXX: although only 1st entry is looked at) 24 * (XXX: although only 1st entry is looked at)
25 * @init_early: Very early callback [called from setup_arch()] 25 * @init_early: Very early callback [called from setup_arch()]
26 * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP) 26 * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
27 * [(M):init_IRQ(), (o):start_kernel_secondary()] 27 * [(M):init_IRQ(), (o):start_kernel_secondary()]
28 * @init_machine: arch initcall level callback (e.g. populate static 28 * @init_machine: arch initcall level callback (e.g. populate static
29 * platform devices or parse Devicetree) 29 * platform devices or parse Devicetree)
@@ -35,7 +35,7 @@ struct machine_desc {
35 const char **dt_compat; 35 const char **dt_compat;
36 void (*init_early)(void); 36 void (*init_early)(void);
37#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
38 void (*init_cpu_smp)(unsigned int); 38 void (*init_per_cpu)(unsigned int);
39#endif 39#endif
40 void (*init_machine)(void); 40 void (*init_machine)(void);
41 void (*init_late)(void); 41 void (*init_late)(void);
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 133c867d15af..991380438d6b 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
48 * @init_early_smp: A SMP specific h/w block can init itself 48 * @init_early_smp: A SMP specific h/w block can init itself
49 * Could be common across platforms so not covered by 49 * Could be common across platforms so not covered by
50 * mach_desc->init_early() 50 * mach_desc->init_early()
51 * @init_irq_cpu: Called for each core so SMP h/w block driver can do 51 * @init_per_cpu: Called for each core so SMP h/w block driver can do
52 * any needed setup per cpu (e.g. IPI request) 52 * any needed setup per cpu (e.g. IPI request)
53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) 53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
54 * @ipi_send: To send IPI to a @cpu 54 * @ipi_send: To send IPI to a @cpu
@@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
57struct plat_smp_ops { 57struct plat_smp_ops {
58 const char *info; 58 const char *info;
59 void (*init_early_smp)(void); 59 void (*init_early_smp)(void);
60 void (*init_irq_cpu)(int cpu); 60 void (*init_per_cpu)(int cpu);
61 void (*cpu_kick)(int cpu, unsigned long pc); 61 void (*cpu_kick)(int cpu, unsigned long pc);
62 void (*ipi_send)(int cpu); 62 void (*ipi_send)(int cpu);
63 void (*ipi_clear)(int irq); 63 void (*ipi_clear)(int irq);
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
index 7ca628b6ee2a..c11a25bb8158 100644
--- a/arch/arc/include/asm/unwind.h
+++ b/arch/arc/include/asm/unwind.h
@@ -112,7 +112,6 @@ struct unwind_frame_info {
112 112
113extern int arc_unwind(struct unwind_frame_info *frame); 113extern int arc_unwind(struct unwind_frame_info *frame);
114extern void arc_unwind_init(void); 114extern void arc_unwind_init(void);
115extern void arc_unwind_setup(void);
116extern void *unwind_add_table(struct module *module, const void *table_start, 115extern void *unwind_add_table(struct module *module, const void *table_start,
117 unsigned long table_size); 116 unsigned long table_size);
118extern void unwind_remove_table(void *handle, int init_only); 117extern void unwind_remove_table(void *handle, int init_only);
@@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
152{ 151{
153} 152}
154 153
155static inline void arc_unwind_setup(void)
156{
157}
158#define unwind_add_table(a, b, c) 154#define unwind_add_table(a, b, c)
159#define unwind_remove_table(a, b) 155#define unwind_remove_table(a, b)
160 156
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 26c156827479..0394f9f61b46 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, 106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
107 irq_hw_number_t hw) 107 irq_hw_number_t hw)
108{ 108{
109 if (irq == TIMER0_IRQ || irq == IPI_IRQ) 109 /*
110 * core intc IRQs [16, 23]:
111 * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
112 */
113 if (hw < 24) {
114 /*
115 * A subsequent request_percpu_irq() fails if percpu_devid is
116 * not set. That in turns sets NOAUTOEN, meaning each core needs
117 * to call enable_percpu_irq()
118 */
119 irq_set_percpu_devid(irq);
110 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); 120 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
111 else 121 } else {
112 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); 122 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
123 }
113 124
114 return 0; 125 return 0;
115} 126}
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 2ee226546c6a..ba17f85285cf 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -29,11 +29,11 @@ void __init init_IRQ(void)
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31 /* a SMP H/w block could do IPI IRQ request here */ 31 /* a SMP H/w block could do IPI IRQ request here */
32 if (plat_smp_ops.init_irq_cpu) 32 if (plat_smp_ops.init_per_cpu)
33 plat_smp_ops.init_irq_cpu(smp_processor_id()); 33 plat_smp_ops.init_per_cpu(smp_processor_id());
34 34
35 if (machine_desc->init_cpu_smp) 35 if (machine_desc->init_per_cpu)
36 machine_desc->init_cpu_smp(smp_processor_id()); 36 machine_desc->init_per_cpu(smp_processor_id());
37#endif 37#endif
38} 38}
39 39
@@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
51 set_irq_regs(old_regs); 51 set_irq_regs(old_regs);
52} 52}
53 53
54/*
55 * API called for requesting percpu interrupts - called by each CPU
56 * - For boot CPU, actually request the IRQ with genirq core + enables
57 * - For subsequent callers only enable called locally
58 *
59 * Relies on being called by boot cpu first (i.e. request called ahead) of
60 * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
61 * which are guaranteed to be setup on boot core first.
62 * Late probed peripherals such as perf can't use this as there no guarantee
63 * of being called on boot CPU first.
64 */
65
54void arc_request_percpu_irq(int irq, int cpu, 66void arc_request_percpu_irq(int irq, int cpu,
55 irqreturn_t (*isr)(int irq, void *dev), 67 irqreturn_t (*isr)(int irq, void *dev),
56 const char *irq_nm, 68 const char *irq_nm,
@@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
60 if (!cpu) { 72 if (!cpu) {
61 int rc; 73 int rc;
62 74
75#ifdef CONFIG_ISA_ARCOMPACT
63 /* 76 /*
64 * These 2 calls are essential to making percpu IRQ APIs work 77 * A subsequent request_percpu_irq() fails if percpu_devid is
65 * Ideally these details could be hidden in irq chip map function 78 * not set. That in turns sets NOAUTOEN, meaning each core needs
66 * but the issue is IPIs IRQs being static (non-DT) and platform 79 * to call enable_percpu_irq()
67 * specific, so we can't identify them there. 80 *
81 * For ARCv2, this is done in irq map function since we know
82 * which irqs are strictly per cpu
68 */ 83 */
69 irq_set_percpu_devid(irq); 84 irq_set_percpu_devid(irq);
70 irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ 85#endif
71 86
72 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); 87 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
73 if (rc) 88 if (rc)
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 74a9b074ac3e..bd237acdf4f2 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
132struct plat_smp_ops plat_smp_ops = { 132struct plat_smp_ops plat_smp_ops = {
133 .info = smp_cpuinfo_buf, 133 .info = smp_cpuinfo_buf,
134 .init_early_smp = mcip_probe_n_setup, 134 .init_early_smp = mcip_probe_n_setup,
135 .init_irq_cpu = mcip_setup_per_cpu, 135 .init_per_cpu = mcip_setup_per_cpu,
136 .ipi_send = mcip_ipi_send, 136 .ipi_send = mcip_ipi_send,
137 .ipi_clear = mcip_ipi_clear, 137 .ipi_clear = mcip_ipi_clear,
138}; 138};
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 0c08bb1ce15a..8b134cfe5e1f 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
428 428
429#endif /* CONFIG_ISA_ARCV2 */ 429#endif /* CONFIG_ISA_ARCV2 */
430 430
431void arc_cpu_pmu_irq_init(void) 431static void arc_cpu_pmu_irq_init(void *data)
432{ 432{
433 struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); 433 int irq = *(int *)data;
434 434
435 arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, 435 enable_percpu_irq(irq, IRQ_TYPE_NONE);
436 "ARC perf counters", pmu_cpu);
437 436
438 /* Clear all pending interrupt flags */ 437 /* Clear all pending interrupt flags */
439 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 438 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
@@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
515 514
516 if (has_interrupts) { 515 if (has_interrupts) {
517 int irq = platform_get_irq(pdev, 0); 516 int irq = platform_get_irq(pdev, 0);
518 unsigned long flags;
519 517
520 if (irq < 0) { 518 if (irq < 0) {
521 pr_err("Cannot get IRQ number for the platform\n"); 519 pr_err("Cannot get IRQ number for the platform\n");
@@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
524 522
525 arc_pmu->irq = irq; 523 arc_pmu->irq = irq;
526 524
527 /* 525 /* intc map function ensures irq_set_percpu_devid() called */
528 * arc_cpu_pmu_irq_init() needs to be called on all cores for 526 request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
529 * their respective local PMU. 527 this_cpu_ptr(&arc_pmu_cpu));
530 * However we use opencoded on_each_cpu() to ensure it is called 528
531 * on core0 first, so that arc_request_percpu_irq() sets up 529 on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
532 * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable 530
533 * perf IRQ on non master cores.
534 * see arc_request_percpu_irq()
535 */
536 preempt_disable();
537 local_irq_save(flags);
538 arc_cpu_pmu_irq_init();
539 local_irq_restore(flags);
540 smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
541 preempt_enable();
542
543 /* Clean all pending interrupt flags */
544 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
545 } else 531 } else
546 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 532 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
547 533
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index c33e77c0ad3e..e1b87444ea9a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
429#endif 429#endif
430 430
431 arc_unwind_init(); 431 arc_unwind_init();
432 arc_unwind_setup();
433} 432}
434 433
435static int __init customize_machine(void) 434static int __init customize_machine(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 580587805fa3..ef6e9e15b82a 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -132,11 +132,11 @@ void start_kernel_secondary(void)
132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
133 133
134 /* Some SMP H/w setup - for each cpu */ 134 /* Some SMP H/w setup - for each cpu */
135 if (plat_smp_ops.init_irq_cpu) 135 if (plat_smp_ops.init_per_cpu)
136 plat_smp_ops.init_irq_cpu(cpu); 136 plat_smp_ops.init_per_cpu(cpu);
137 137
138 if (machine_desc->init_cpu_smp) 138 if (machine_desc->init_per_cpu)
139 machine_desc->init_cpu_smp(cpu); 139 machine_desc->init_per_cpu(cpu);
140 140
141 arc_local_timer_setup(); 141 arc_local_timer_setup();
142 142
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 7352475451f6..cf2828ab0905 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
170 170
171static unsigned long read_pointer(const u8 **pLoc, 171static unsigned long read_pointer(const u8 **pLoc,
172 const void *end, signed ptrType); 172 const void *end, signed ptrType);
173static void init_unwind_hdr(struct unwind_table *table,
174 void *(*alloc) (unsigned long));
175
176/*
177 * wrappers for header alloc (vs. calling one vs. other at call site)
178 * to elide section mismatches warnings
179 */
180static void *__init unw_hdr_alloc_early(unsigned long sz)
181{
182 return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
183 MAX_DMA_ADDRESS);
184}
185
186static void *unw_hdr_alloc(unsigned long sz)
187{
188 return kmalloc(sz, GFP_KERNEL);
189}
173 190
174static void init_unwind_table(struct unwind_table *table, const char *name, 191static void init_unwind_table(struct unwind_table *table, const char *name,
175 const void *core_start, unsigned long core_size, 192 const void *core_start, unsigned long core_size,
@@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
209 __start_unwind, __end_unwind - __start_unwind, 226 __start_unwind, __end_unwind - __start_unwind,
210 NULL, 0); 227 NULL, 0);
211 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ 228 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
229
230 init_unwind_hdr(&root_table, unw_hdr_alloc_early);
212} 231}
213 232
214static const u32 bad_cie, not_fde; 233static const u32 bad_cie, not_fde;
@@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
241 e2->fde = v; 260 e2->fde = v;
242} 261}
243 262
244static void __init setup_unwind_table(struct unwind_table *table, 263static void init_unwind_hdr(struct unwind_table *table,
245 void *(*alloc) (unsigned long)) 264 void *(*alloc) (unsigned long))
246{ 265{
247 const u8 *ptr; 266 const u8 *ptr;
248 unsigned long tableSize = table->size, hdrSize; 267 unsigned long tableSize = table->size, hdrSize;
@@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table,
274 const u32 *cie = cie_for_fde(fde, table); 293 const u32 *cie = cie_for_fde(fde, table);
275 signed ptrType; 294 signed ptrType;
276 295
277 if (cie == &not_fde) 296 if (cie == &not_fde) /* only process FDE here */
278 continue; 297 continue;
279 if (cie == NULL || cie == &bad_cie) 298 if (cie == NULL || cie == &bad_cie)
280 return; 299 continue; /* say FDE->CIE.version != 1 */
281 ptrType = fde_pointer_type(cie); 300 ptrType = fde_pointer_type(cie);
282 if (ptrType < 0) 301 if (ptrType < 0)
283 return; 302 continue;
284 303
285 ptr = (const u8 *)(fde + 2); 304 ptr = (const u8 *)(fde + 2);
286 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, 305 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table,
300 319
301 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) 320 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
302 + 2 * n * sizeof(unsigned long); 321 + 2 * n * sizeof(unsigned long);
322
303 header = alloc(hdrSize); 323 header = alloc(hdrSize);
304 if (!header) 324 if (!header)
305 return; 325 return;
326
306 header->version = 1; 327 header->version = 1;
307 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; 328 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
308 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; 329 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
@@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
322 343
323 if (fde[1] == 0xffffffff) 344 if (fde[1] == 0xffffffff)
324 continue; /* this is a CIE */ 345 continue; /* this is a CIE */
346
347 if (*(u8 *)(cie + 2) != 1)
348 continue; /* FDE->CIE.version not supported */
349
325 ptr = (const u8 *)(fde + 2); 350 ptr = (const u8 *)(fde + 2);
326 header->table[n].start = read_pointer(&ptr, 351 header->table[n].start = read_pointer(&ptr,
327 (const u8 *)(fde + 1) + 352 (const u8 *)(fde + 1) +
@@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table,
342 table->header = (const void *)header; 367 table->header = (const void *)header;
343} 368}
344 369
345static void *__init balloc(unsigned long sz)
346{
347 return __alloc_bootmem_nopanic(sz,
348 sizeof(unsigned int),
349 __pa(MAX_DMA_ADDRESS));
350}
351
352void __init arc_unwind_setup(void)
353{
354 setup_unwind_table(&root_table, balloc);
355}
356
357#ifdef CONFIG_MODULES 370#ifdef CONFIG_MODULES
358 371
359static struct unwind_table *last_table; 372static struct unwind_table *last_table;
@@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
377 table_start, table_size, 390 table_start, table_size,
378 NULL, 0); 391 NULL, 0);
379 392
393 init_unwind_hdr(table, unw_hdr_alloc);
394
380#ifdef UNWIND_DEBUG 395#ifdef UNWIND_DEBUG
381 unw_debug("Table added for [%s] %lx %lx\n", 396 unw_debug("Table added for [%s] %lx %lx\n",
382 module->name, table->core.pc, table->core.range); 397 module->name, table->core.pc, table->core.range);
@@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
439 info.init_only = init_only; 454 info.init_only = init_only;
440 455
441 unlink_table(&info); /* XXX: SMP */ 456 unlink_table(&info); /* XXX: SMP */
457 kfree(table->header);
442 kfree(table); 458 kfree(table);
443} 459}
444 460
@@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
507 523
508 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) 524 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
509 || (*cie & (sizeof(*cie) - 1)) 525 || (*cie & (sizeof(*cie) - 1))
510 || (cie[1] != 0xffffffff)) 526 || (cie[1] != 0xffffffff)
527 || ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
511 return NULL; /* this is not a (valid) CIE */ 528 return NULL; /* this is not a (valid) CIE */
512 return cie; 529 return cie;
513} 530}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index a9305b5a2cd4..7d2c4fbf4f22 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
51 int in_use = 0; 51 int in_use = 0;
52 52
53 if (!low_mem_sz) { 53 if (!low_mem_sz) {
54 BUG_ON(base != low_mem_start); 54 if (base != low_mem_start)
55 panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
56
55 low_mem_sz = size; 57 low_mem_sz = size;
56 in_use = 1; 58 in_use = 1;
57 } else { 59 } else {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d83ff9c9701e..de8791a4d131 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -74,7 +74,7 @@
74 reg = <0x48240200 0x100>; 74 reg = <0x48240200 0x100>;
75 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 75 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
76 interrupt-parent = <&gic>; 76 interrupt-parent = <&gic>;
77 clocks = <&dpll_mpu_m2_ck>; 77 clocks = <&mpu_periphclk>;
78 }; 78 };
79 79
80 local_timer: timer@48240600 { 80 local_timer: timer@48240600 {
@@ -82,7 +82,7 @@
82 reg = <0x48240600 0x100>; 82 reg = <0x48240600 0x100>;
83 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 83 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
84 interrupt-parent = <&gic>; 84 interrupt-parent = <&gic>;
85 clocks = <&dpll_mpu_m2_ck>; 85 clocks = <&mpu_periphclk>;
86 }; 86 };
87 87
88 l2-cache-controller@48242000 { 88 l2-cache-controller@48242000 {
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index cc88728d751d..a38af2bfbfcf 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -259,6 +259,14 @@
259 ti,invert-autoidle-bit; 259 ti,invert-autoidle-bit;
260 }; 260 };
261 261
262 mpu_periphclk: mpu_periphclk {
263 #clock-cells = <0>;
264 compatible = "fixed-factor-clock";
265 clocks = <&dpll_mpu_m2_ck>;
266 clock-mult = <1>;
267 clock-div = <2>;
268 };
269
262 dpll_ddr_ck: dpll_ddr_ck { 270 dpll_ddr_ck: dpll_ddr_ck {
263 #clock-cells = <0>; 271 #clock-cells = <0>;
264 compatible = "ti,am3-dpll-clock"; 272 compatible = "ti,am3-dpll-clock";
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index ad6de73ed5a5..e74df327cdd3 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -184,6 +184,7 @@
184 regulator-name = "VDD_SDHC_1V8"; 184 regulator-name = "VDD_SDHC_1V8";
185 regulator-min-microvolt = <1800000>; 185 regulator-min-microvolt = <1800000>;
186 regulator-max-microvolt = <1800000>; 186 regulator-max-microvolt = <1800000>;
187 regulator-always-on;
187 }; 188 };
188 }; 189 };
189 }; 190 };
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 8ea177f375dd..fb1da99996ea 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -118,7 +118,8 @@
118 sdhci0: sdhci@ab0000 { 118 sdhci0: sdhci@ab0000 {
119 compatible = "mrvl,pxav3-mmc"; 119 compatible = "mrvl,pxav3-mmc";
120 reg = <0xab0000 0x200>; 120 reg = <0xab0000 0x200>;
121 clocks = <&chip_clk CLKID_SDIO1XIN>; 121 clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
122 clock-names = "io", "core";
122 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; 123 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
123 status = "disabled"; 124 status = "disabled";
124 }; 125 };
@@ -126,7 +127,8 @@
126 sdhci1: sdhci@ab0800 { 127 sdhci1: sdhci@ab0800 {
127 compatible = "mrvl,pxav3-mmc"; 128 compatible = "mrvl,pxav3-mmc";
128 reg = <0xab0800 0x200>; 129 reg = <0xab0800 0x200>;
129 clocks = <&chip_clk CLKID_SDIO1XIN>; 130 clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
131 clock-names = "io", "core";
130 interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; 132 interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
131 status = "disabled"; 133 status = "disabled";
132 }; 134 };
@@ -135,7 +137,7 @@
135 compatible = "mrvl,pxav3-mmc"; 137 compatible = "mrvl,pxav3-mmc";
136 reg = <0xab1000 0x200>; 138 reg = <0xab1000 0x200>;
137 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 139 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
138 clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>; 140 clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
139 clock-names = "io", "core"; 141 clock-names = "io", "core";
140 status = "disabled"; 142 status = "disabled";
141 }; 143 };
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c99cfa1a876..eee636de4cd8 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -218,6 +218,7 @@
218 reg = <0x480c8000 0x2000>; 218 reg = <0x480c8000 0x2000>;
219 interrupts = <77>; 219 interrupts = <77>;
220 ti,hwmods = "mailbox"; 220 ti,hwmods = "mailbox";
221 #mbox-cells = <1>;
221 ti,mbox-num-users = <4>; 222 ti,mbox-num-users = <4>;
222 ti,mbox-num-fifos = <12>; 223 ti,mbox-num-fifos = <12>;
223 mbox_dsp: mbox_dsp { 224 mbox_dsp: mbox_dsp {
@@ -279,8 +280,11 @@
279 ti,spi-num-cs = <4>; 280 ti,spi-num-cs = <4>;
280 ti,hwmods = "mcspi1"; 281 ti,hwmods = "mcspi1";
281 dmas = <&edma 16 &edma 17 282 dmas = <&edma 16 &edma 17
282 &edma 18 &edma 19>; 283 &edma 18 &edma 19
283 dma-names = "tx0", "rx0", "tx1", "rx1"; 284 &edma 20 &edma 21
285 &edma 22 &edma 23>;
286 dma-names = "tx0", "rx0", "tx1", "rx1",
287 "tx2", "rx2", "tx3", "rx3";
284 }; 288 };
285 289
286 mmc1: mmc@48060000 { 290 mmc1: mmc@48060000 {
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 49a4f43e5ac2..1cc2e95ffc66 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -122,6 +122,12 @@
122 compatible = "auo,b133htn01"; 122 compatible = "auo,b133htn01";
123 power-supply = <&tps65090_fet6>; 123 power-supply = <&tps65090_fet6>;
124 backlight = <&backlight>; 124 backlight = <&backlight>;
125
126 port {
127 panel_in: endpoint {
128 remote-endpoint = <&dp_out>;
129 };
130 };
125 }; 131 };
126 132
127 mmc1_pwrseq: mmc1_pwrseq { 133 mmc1_pwrseq: mmc1_pwrseq {
@@ -148,7 +154,14 @@
148 samsung,link-rate = <0x0a>; 154 samsung,link-rate = <0x0a>;
149 samsung,lane-count = <2>; 155 samsung,lane-count = <2>;
150 samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>; 156 samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>;
151 panel = <&panel>; 157
158 ports {
159 port {
160 dp_out: endpoint {
161 remote-endpoint = <&panel_in>;
162 };
163 };
164 };
152}; 165};
153 166
154&fimd { 167&fimd {
diff --git a/arch/arm/boot/dts/vf610-colibri.dtsi b/arch/arm/boot/dts/vf610-colibri.dtsi
index 19fe045b8334..2d7eab755210 100644
--- a/arch/arm/boot/dts/vf610-colibri.dtsi
+++ b/arch/arm/boot/dts/vf610-colibri.dtsi
@@ -18,8 +18,3 @@
18 reg = <0x80000000 0x10000000>; 18 reg = <0x80000000 0x10000000>;
19 }; 19 };
20}; 20};
21
22&L2 {
23 arm,data-latency = <2 1 2>;
24 arm,tag-latency = <3 2 3>;
25};
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index 5f8eb1bd782b..58bc6e448be5 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -19,7 +19,7 @@
19 reg = <0x40006000 0x1000>; 19 reg = <0x40006000 0x1000>;
20 cache-unified; 20 cache-unified;
21 cache-level = <2>; 21 cache-level = <2>;
22 arm,data-latency = <1 1 1>; 22 arm,data-latency = <3 3 3>;
23 arm,tag-latency = <2 2 2>; 23 arm,tag-latency = <2 2 2>;
24 }; 24 };
25}; 25};
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 0d5acc2cdc8e..3cd1b27f2697 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -178,8 +178,10 @@
178 compatible = "fsl,vf610-sai"; 178 compatible = "fsl,vf610-sai";
179 reg = <0x40031000 0x1000>; 179 reg = <0x40031000 0x1000>;
180 interrupts = <86 IRQ_TYPE_LEVEL_HIGH>; 180 interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
181 clocks = <&clks VF610_CLK_SAI2>; 181 clocks = <&clks VF610_CLK_SAI2>,
182 clock-names = "sai"; 182 <&clks VF610_CLK_SAI2_DIV>,
183 <&clks 0>, <&clks 0>;
184 clock-names = "bus", "mclk1", "mclk2", "mclk3";
183 dma-names = "tx", "rx"; 185 dma-names = "tx", "rx";
184 dmas = <&edma0 0 21>, 186 dmas = <&edma0 0 21>,
185 <&edma0 0 20>; 187 <&edma0 0 20>;
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 6607d976e07d..7da5503c0591 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -21,6 +21,7 @@
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23#include <linux/io.h> 23#include <linux/io.h>
24#include <asm/barrier.h>
24 25
25#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 26#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
26#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm 27#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 8cc85a4ebec2..35c9db857ebe 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
510static inline unsigned long __must_check 510static inline unsigned long __must_check
511__copy_to_user(void __user *to, const void *from, unsigned long n) 511__copy_to_user(void __user *to, const void *from, unsigned long n)
512{ 512{
513#ifndef CONFIG_UACCESS_WITH_MEMCPY
513 unsigned int __ua_flags = uaccess_save_and_enable(); 514 unsigned int __ua_flags = uaccess_save_and_enable();
514 n = arm_copy_to_user(to, from, n); 515 n = arm_copy_to_user(to, from, n);
515 uaccess_restore(__ua_flags); 516 uaccess_restore(__ua_flags);
516 return n; 517 return n;
518#else
519 return arm_copy_to_user(to, from, n);
520#endif
517} 521}
518 522
519extern unsigned long __must_check 523extern unsigned long __must_check
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7a7c4cea5523..4adfb46e3ee9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
95{ 95{
96 unsigned long flags; 96 unsigned long flags;
97 char buf[64]; 97 char buf[64];
98#ifndef CONFIG_CPU_V7M
99 unsigned int domain;
100#ifdef CONFIG_CPU_SW_DOMAIN_PAN
101 /*
102 * Get the domain register for the parent context. In user
103 * mode, we don't save the DACR, so lets use what it should
104 * be. For other modes, we place it after the pt_regs struct.
105 */
106 if (user_mode(regs))
107 domain = DACR_UACCESS_ENABLE;
108 else
109 domain = *(unsigned int *)(regs + 1);
110#else
111 domain = get_domain();
112#endif
113#endif
98 114
99 show_regs_print_info(KERN_DEFAULT); 115 show_regs_print_info(KERN_DEFAULT);
100 116
@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
123 139
124#ifndef CONFIG_CPU_V7M 140#ifndef CONFIG_CPU_V7M
125 { 141 {
126 unsigned int domain = get_domain();
127 const char *segment; 142 const char *segment;
128 143
129#ifdef CONFIG_CPU_SW_DOMAIN_PAN
130 /*
131 * Get the domain register for the parent context. In user
132 * mode, we don't save the DACR, so lets use what it should
133 * be. For other modes, we place it after the pt_regs struct.
134 */
135 if (user_mode(regs))
136 domain = DACR_UACCESS_ENABLE;
137 else
138 domain = *(unsigned int *)(regs + 1);
139#endif
140
141 if ((domain & domain_mask(DOMAIN_USER)) == 144 if ((domain & domain_mask(DOMAIN_USER)) ==
142 domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) 145 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
143 segment = "none"; 146 segment = "none";
@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
163 buf[0] = '\0'; 166 buf[0] = '\0';
164#ifdef CONFIG_CPU_CP15_MMU 167#ifdef CONFIG_CPU_CP15_MMU
165 { 168 {
166 unsigned int transbase, dac = get_domain(); 169 unsigned int transbase;
167 asm("mrc p15, 0, %0, c2, c0\n\t" 170 asm("mrc p15, 0, %0, c2, c0\n\t"
168 : "=r" (transbase)); 171 : "=r" (transbase));
169 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 172 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
170 transbase, dac); 173 transbase, domain);
171 } 174 }
172#endif 175#endif
173 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); 176 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5b26e7efa9ea..c3fe769d7558 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -36,10 +36,10 @@
36 */ 36 */
37#define __user_swpX_asm(data, addr, res, temp, B) \ 37#define __user_swpX_asm(data, addr, res, temp, B) \
38 __asm__ __volatile__( \ 38 __asm__ __volatile__( \
39 " mov %2, %1\n" \ 39 "0: ldrex"B" %2, [%3]\n" \
40 "0: ldrex"B" %1, [%3]\n" \ 40 "1: strex"B" %0, %1, [%3]\n" \
41 "1: strex"B" %0, %2, [%3]\n" \
42 " cmp %0, #0\n" \ 41 " cmp %0, #0\n" \
42 " moveq %1, %2\n" \
43 " movne %0, %4\n" \ 43 " movne %0, %4\n" \
44 "2:\n" \ 44 "2:\n" \
45 " .section .text.fixup,\"ax\"\n" \ 45 " .section .text.fixup,\"ax\"\n" \
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index d72b90905132..588bbc288396 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
88static unsigned long noinline 88static unsigned long noinline
89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) 89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
90{ 90{
91 unsigned long ua_flags;
91 int atomic; 92 int atomic;
92 93
93 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 94 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
118 if (tocopy > n) 119 if (tocopy > n)
119 tocopy = n; 120 tocopy = n;
120 121
122 ua_flags = uaccess_save_and_enable();
121 memcpy((void *)to, from, tocopy); 123 memcpy((void *)to, from, tocopy);
124 uaccess_restore(ua_flags);
122 to += tocopy; 125 to += tocopy;
123 from += tocopy; 126 from += tocopy;
124 n -= tocopy; 127 n -= tocopy;
@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
145 * With frame pointer disabled, tail call optimization kicks in 148 * With frame pointer disabled, tail call optimization kicks in
146 * as well making this test almost invisible. 149 * as well making this test almost invisible.
147 */ 150 */
148 if (n < 64) 151 if (n < 64) {
149 return __copy_to_user_std(to, from, n); 152 unsigned long ua_flags = uaccess_save_and_enable();
150 return __copy_to_user_memcpy(to, from, n); 153 n = __copy_to_user_std(to, from, n);
154 uaccess_restore(ua_flags);
155 } else {
156 n = __copy_to_user_memcpy(to, from, n);
157 }
158 return n;
151} 159}
152 160
153static unsigned long noinline 161static unsigned long noinline
154__clear_user_memset(void __user *addr, unsigned long n) 162__clear_user_memset(void __user *addr, unsigned long n)
155{ 163{
164 unsigned long ua_flags;
165
156 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 166 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
157 memset((void *)addr, 0, n); 167 memset((void *)addr, 0, n);
158 return 0; 168 return 0;
@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
175 if (tocopy > n) 185 if (tocopy > n)
176 tocopy = n; 186 tocopy = n;
177 187
188 ua_flags = uaccess_save_and_enable();
178 memset((void *)addr, 0, tocopy); 189 memset((void *)addr, 0, tocopy);
190 uaccess_restore(ua_flags);
179 addr += tocopy; 191 addr += tocopy;
180 n -= tocopy; 192 n -= tocopy;
181 193
@@ -193,9 +205,14 @@ out:
193unsigned long arm_clear_user(void __user *addr, unsigned long n) 205unsigned long arm_clear_user(void __user *addr, unsigned long n)
194{ 206{
195 /* See rational for this in __copy_to_user() above. */ 207 /* See rational for this in __copy_to_user() above. */
196 if (n < 64) 208 if (n < 64) {
197 return __clear_user_std(addr, n); 209 unsigned long ua_flags = uaccess_save_and_enable();
198 return __clear_user_memset(addr, n); 210 n = __clear_user_std(addr, n);
211 uaccess_restore(ua_flags);
212 } else {
213 n = __clear_user_memset(addr, n);
214 }
215 return n;
199} 216}
200 217
201#if 0 218#if 0
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 92673006e55c..28656c2b54a0 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -4,7 +4,6 @@ menuconfig ARCH_AT91
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
5 select COMMON_CLK_AT91 5 select COMMON_CLK_AT91
6 select PINCTRL 6 select PINCTRL
7 select PINCTRL_AT91
8 select SOC_BUS 7 select SOC_BUS
9 8
10if ARCH_AT91 9if ARCH_AT91
@@ -17,6 +16,7 @@ config SOC_SAMA5D2
17 select HAVE_AT91_USB_CLK 16 select HAVE_AT91_USB_CLK
18 select HAVE_AT91_H32MX 17 select HAVE_AT91_H32MX
19 select HAVE_AT91_GENERATED_CLK 18 select HAVE_AT91_GENERATED_CLK
19 select PINCTRL_AT91PIO4
20 help 20 help
21 Select this if ou are using one of Atmel's SAMA5D2 family SoC. 21 Select this if ou are using one of Atmel's SAMA5D2 family SoC.
22 22
@@ -27,6 +27,7 @@ config SOC_SAMA5D3
27 select HAVE_AT91_UTMI 27 select HAVE_AT91_UTMI
28 select HAVE_AT91_SMD 28 select HAVE_AT91_SMD
29 select HAVE_AT91_USB_CLK 29 select HAVE_AT91_USB_CLK
30 select PINCTRL_AT91
30 help 31 help
31 Select this if you are using one of Atmel's SAMA5D3 family SoC. 32 Select this if you are using one of Atmel's SAMA5D3 family SoC.
32 This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36. 33 This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
@@ -40,6 +41,7 @@ config SOC_SAMA5D4
40 select HAVE_AT91_SMD 41 select HAVE_AT91_SMD
41 select HAVE_AT91_USB_CLK 42 select HAVE_AT91_USB_CLK
42 select HAVE_AT91_H32MX 43 select HAVE_AT91_H32MX
44 select PINCTRL_AT91
43 help 45 help
44 Select this if you are using one of Atmel's SAMA5D4 family SoC. 46 Select this if you are using one of Atmel's SAMA5D4 family SoC.
45 47
@@ -50,6 +52,7 @@ config SOC_AT91RM9200
50 select CPU_ARM920T 52 select CPU_ARM920T
51 select HAVE_AT91_USB_CLK 53 select HAVE_AT91_USB_CLK
52 select MIGHT_HAVE_PCI 54 select MIGHT_HAVE_PCI
55 select PINCTRL_AT91
53 select SOC_SAM_V4_V5 56 select SOC_SAM_V4_V5
54 select SRAM if PM 57 select SRAM if PM
55 help 58 help
@@ -65,6 +68,7 @@ config SOC_AT91SAM9
65 select HAVE_AT91_UTMI 68 select HAVE_AT91_UTMI
66 select HAVE_FB_ATMEL 69 select HAVE_FB_ATMEL
67 select MEMORY 70 select MEMORY
71 select PINCTRL_AT91
68 select SOC_SAM_V4_V5 72 select SOC_SAM_V4_V5
69 select SRAM if PM 73 select SRAM if PM
70 help 74 help
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 80e277cfcc8b..23726fb31741 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -41,8 +41,10 @@
41 * implementation should be moved down into the pinctrl driver and get 41 * implementation should be moved down into the pinctrl driver and get
42 * called as part of the generic suspend/resume path. 42 * called as part of the generic suspend/resume path.
43 */ 43 */
44#ifdef CONFIG_PINCTRL_AT91
44extern void at91_pinctrl_gpio_suspend(void); 45extern void at91_pinctrl_gpio_suspend(void);
45extern void at91_pinctrl_gpio_resume(void); 46extern void at91_pinctrl_gpio_resume(void);
47#endif
46 48
47static struct { 49static struct {
48 unsigned long uhp_udp_mask; 50 unsigned long uhp_udp_mask;
@@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
151 153
152static int at91_pm_enter(suspend_state_t state) 154static int at91_pm_enter(suspend_state_t state)
153{ 155{
156#ifdef CONFIG_PINCTRL_AT91
154 at91_pinctrl_gpio_suspend(); 157 at91_pinctrl_gpio_suspend();
155 158#endif
156 switch (state) { 159 switch (state) {
157 /* 160 /*
158 * Suspend-to-RAM is like STANDBY plus slow clock mode, so 161 * Suspend-to-RAM is like STANDBY plus slow clock mode, so
@@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
192error: 195error:
193 target_state = PM_SUSPEND_ON; 196 target_state = PM_SUSPEND_ON;
194 197
198#ifdef CONFIG_PINCTRL_AT91
195 at91_pinctrl_gpio_resume(); 199 at91_pinctrl_gpio_resume();
200#endif
196 return 0; 201 return 0;
197} 202}
198 203
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index de68938ee6aa..c21e41dad19c 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
748void exynos_sys_powerdown_conf(enum sys_powerdown mode) 748void exynos_sys_powerdown_conf(enum sys_powerdown mode)
749{ 749{
750 unsigned int i; 750 unsigned int i;
751 const struct exynos_pmu_data *pmu_data;
752
753 if (!pmu_context)
754 return;
751 755
752 const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data; 756 pmu_data = pmu_context->pmu_data;
753 757
754 if (pmu_data->powerdown_conf) 758 if (pmu_data->powerdown_conf)
755 pmu_data->powerdown_conf(mode); 759 pmu_data->powerdown_conf(mode);
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index b02439019963..7a0c13bf4269 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
143 writel(*vaddr++, bus_addr); 143 writel(*vaddr++, bus_addr);
144} 144}
145 145
146static inline unsigned char __indirect_readb(const volatile void __iomem *p) 146static inline u8 __indirect_readb(const volatile void __iomem *p)
147{ 147{
148 u32 addr = (u32)p; 148 u32 addr = (u32)p;
149 u32 n, byte_enables, data; 149 u32 n, byte_enables, data;
@@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
166 *vaddr++ = readb(bus_addr); 166 *vaddr++ = readb(bus_addr);
167} 167}
168 168
169static inline unsigned short __indirect_readw(const volatile void __iomem *p) 169static inline u16 __indirect_readw(const volatile void __iomem *p)
170{ 170{
171 u32 addr = (u32)p; 171 u32 addr = (u32)p;
172 u32 n, byte_enables, data; 172 u32 n, byte_enables, data;
@@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
189 *vaddr++ = readw(bus_addr); 189 *vaddr++ = readw(bus_addr);
190} 190}
191 191
192static inline unsigned long __indirect_readl(const volatile void __iomem *p) 192static inline u32 __indirect_readl(const volatile void __iomem *p)
193{ 193{
194 u32 addr = (__force u32)p; 194 u32 addr = (__force u32)p;
195 u32 data; 195 u32 data;
@@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
350 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET))) 350 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
351 351
352#define ioread8(p) ioread8(p) 352#define ioread8(p) ioread8(p)
353static inline unsigned int ioread8(const void __iomem *addr) 353static inline u8 ioread8(const void __iomem *addr)
354{ 354{
355 unsigned long port = (unsigned long __force)addr; 355 unsigned long port = (unsigned long __force)addr;
356 if (__is_io_address(port)) 356 if (__is_io_address(port))
@@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
378} 378}
379 379
380#define ioread16(p) ioread16(p) 380#define ioread16(p) ioread16(p)
381static inline unsigned int ioread16(const void __iomem *addr) 381static inline u16 ioread16(const void __iomem *addr)
382{ 382{
383 unsigned long port = (unsigned long __force)addr; 383 unsigned long port = (unsigned long __force)addr;
384 if (__is_io_address(port)) 384 if (__is_io_address(port))
@@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
407} 407}
408 408
409#define ioread32(p) ioread32(p) 409#define ioread32(p) ioread32(p)
410static inline unsigned int ioread32(const void __iomem *addr) 410static inline u32 ioread32(const void __iomem *addr)
411{ 411{
412 unsigned long port = (unsigned long __force)addr; 412 unsigned long port = (unsigned long __force)addr;
413 if (__is_io_address(port)) 413 if (__is_io_address(port))
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 5076d3f334d2..4b4371db5799 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
121 select NEON if CPU_V7 121 select NEON if CPU_V7
122 select PM 122 select PM
123 select REGULATOR 123 select REGULATOR
124 select REGULATOR_FIXED_VOLTAGE
124 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 125 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
125 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 126 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
126 select VFP 127 select VFP
@@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
201 depends on ARCH_OMAP3 202 depends on ARCH_OMAP3
202 default y 203 default y
203 select OMAP_PACKAGE_CBB 204 select OMAP_PACKAGE_CBB
204 select REGULATOR_FIXED_VOLTAGE if REGULATOR
205 205
206config MACH_NOKIA_N810 206config MACH_NOKIA_N810
207 bool 207 bool
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 9a9c15bfcd34..7c0d5618be5e 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -889,6 +889,7 @@ static void __init e680_init(void)
889 889
890 pxa_set_keypad_info(&e680_keypad_platform_data); 890 pxa_set_keypad_info(&e680_keypad_platform_data);
891 891
892 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
892 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 893 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
893 platform_add_devices(ARRAY_AND_SIZE(e680_devices)); 894 platform_add_devices(ARRAY_AND_SIZE(e680_devices));
894} 895}
@@ -956,6 +957,7 @@ static void __init a1200_init(void)
956 957
957 pxa_set_keypad_info(&a1200_keypad_platform_data); 958 pxa_set_keypad_info(&a1200_keypad_platform_data);
958 959
960 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
959 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 961 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
960 platform_add_devices(ARRAY_AND_SIZE(a1200_devices)); 962 platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
961} 963}
@@ -1148,6 +1150,7 @@ static void __init a910_init(void)
1148 platform_device_register(&a910_camera); 1150 platform_device_register(&a910_camera);
1149 } 1151 }
1150 1152
1153 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1151 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1154 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1152 platform_add_devices(ARRAY_AND_SIZE(a910_devices)); 1155 platform_add_devices(ARRAY_AND_SIZE(a910_devices));
1153} 1156}
@@ -1215,6 +1218,7 @@ static void __init e6_init(void)
1215 1218
1216 pxa_set_keypad_info(&e6_keypad_platform_data); 1219 pxa_set_keypad_info(&e6_keypad_platform_data);
1217 1220
1221 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1218 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1222 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1219 platform_add_devices(ARRAY_AND_SIZE(e6_devices)); 1223 platform_add_devices(ARRAY_AND_SIZE(e6_devices));
1220} 1224}
@@ -1256,6 +1260,7 @@ static void __init e2_init(void)
1256 1260
1257 pxa_set_keypad_info(&e2_keypad_platform_data); 1261 pxa_set_keypad_info(&e2_keypad_platform_data);
1258 1262
1263 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1259 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1264 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1260 platform_add_devices(ARRAY_AND_SIZE(e2_devices)); 1265 platform_add_devices(ARRAY_AND_SIZE(e2_devices));
1261} 1266}
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
index a19460e6e7b0..b355fca6cc2e 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
@@ -20,7 +20,7 @@
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_12[] = {
24 { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ 24 { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
25 { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ 25 { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
26 { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ 26 { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
index 1191b2905625..be9a248b5ce9 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
@@ -20,7 +20,7 @@
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
24 { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ 24 { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
25 { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ 25 { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
26 { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ 26 { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 845769e41332..c8c8b9ed02e0 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
165 __flush_icache_all(); 165 __flush_icache_all();
166} 166}
167 167
168static int is_reserved_asid(u64 asid) 168static bool check_update_reserved_asid(u64 asid, u64 newasid)
169{ 169{
170 int cpu; 170 int cpu;
171 for_each_possible_cpu(cpu) 171 bool hit = false;
172 if (per_cpu(reserved_asids, cpu) == asid) 172
173 return 1; 173 /*
174 return 0; 174 * Iterate over the set of reserved ASIDs looking for a match.
175 * If we find one, then we can update our mm to use newasid
176 * (i.e. the same ASID in the current generation) but we can't
177 * exit the loop early, since we need to ensure that all copies
178 * of the old ASID are updated to reflect the mm. Failure to do
179 * so could result in us missing the reserved ASID in a future
180 * generation.
181 */
182 for_each_possible_cpu(cpu) {
183 if (per_cpu(reserved_asids, cpu) == asid) {
184 hit = true;
185 per_cpu(reserved_asids, cpu) = newasid;
186 }
187 }
188
189 return hit;
175} 190}
176 191
177static u64 new_context(struct mm_struct *mm, unsigned int cpu) 192static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181 u64 generation = atomic64_read(&asid_generation); 196 u64 generation = atomic64_read(&asid_generation);
182 197
183 if (asid != 0) { 198 if (asid != 0) {
199 u64 newasid = generation | (asid & ~ASID_MASK);
200
184 /* 201 /*
185 * If our current ASID was active during a rollover, we 202 * If our current ASID was active during a rollover, we
186 * can continue to use it and this was just a false alarm. 203 * can continue to use it and this was just a false alarm.
187 */ 204 */
188 if (is_reserved_asid(asid)) 205 if (check_update_reserved_asid(asid, newasid))
189 return generation | (asid & ~ASID_MASK); 206 return newasid;
190 207
191 /* 208 /*
192 * We had a valid ASID in a previous life, so try to re-use 209 * We had a valid ASID in a previous life, so try to re-use
@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
194 */ 211 */
195 asid &= ~ASID_MASK; 212 asid &= ~ASID_MASK;
196 if (!__test_and_set_bit(asid, asid_map)) 213 if (!__test_and_set_bit(asid, asid_map))
197 goto bump_gen; 214 return newasid;
198 } 215 }
199 216
200 /* 217 /*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
216 233
217 __set_bit(asid, asid_map); 234 __set_bit(asid, asid_map);
218 cur_idx = asid; 235 cur_idx = asid;
219
220bump_gen:
221 asid |= generation;
222 cpumask_clear(mm_cpumask(mm)); 236 cpumask_clear(mm_cpumask(mm));
223 return asid; 237 return asid | generation;
224} 238}
225 239
226void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 240void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e62400e5fb99..534a60ae282e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1521 return -ENOMEM; 1521 return -ENOMEM;
1522 1522
1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1524 phys_addr_t phys = sg_phys(s) & PAGE_MASK; 1524 phys_addr_t phys = page_to_phys(sg_page(s));
1525 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1525 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1526 1526
1527 if (!is_coherent && 1527 if (!is_coherent &&
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8a63b4cdc0f2..7f8cd1b3557f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/dma-contiguous.h> 23#include <linux/dma-contiguous.h>
24#include <linux/sizes.h> 24#include <linux/sizes.h>
25#include <linux/stop_machine.h>
25 26
26#include <asm/cp15.h> 27#include <asm/cp15.h>
27#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
627 * safe to be called with preemption disabled, as under stop_machine(). 628 * safe to be called with preemption disabled, as under stop_machine().
628 */ 629 */
629static inline void section_update(unsigned long addr, pmdval_t mask, 630static inline void section_update(unsigned long addr, pmdval_t mask,
630 pmdval_t prot) 631 pmdval_t prot, struct mm_struct *mm)
631{ 632{
632 struct mm_struct *mm;
633 pmd_t *pmd; 633 pmd_t *pmd;
634 634
635 mm = current->active_mm;
636 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 635 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
637 636
638#ifdef CONFIG_ARM_LPAE 637#ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
656 return !!(get_cr() & CR_XP); 655 return !!(get_cr() & CR_XP);
657} 656}
658 657
659#define set_section_perms(perms, field) { \ 658void set_section_perms(struct section_perm *perms, int n, bool set,
660 size_t i; \ 659 struct mm_struct *mm)
661 unsigned long addr; \ 660{
662 \ 661 size_t i;
663 if (!arch_has_strict_perms()) \ 662 unsigned long addr;
664 return; \ 663
665 \ 664 if (!arch_has_strict_perms())
666 for (i = 0; i < ARRAY_SIZE(perms); i++) { \ 665 return;
667 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ 666
668 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ 667 for (i = 0; i < n; i++) {
669 pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ 668 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
670 perms[i].start, perms[i].end, \ 669 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
671 SECTION_SIZE); \ 670 pr_err("BUG: section %lx-%lx not aligned to %lx\n",
672 continue; \ 671 perms[i].start, perms[i].end,
673 } \ 672 SECTION_SIZE);
674 \ 673 continue;
675 for (addr = perms[i].start; \ 674 }
676 addr < perms[i].end; \ 675
677 addr += SECTION_SIZE) \ 676 for (addr = perms[i].start;
678 section_update(addr, perms[i].mask, \ 677 addr < perms[i].end;
679 perms[i].field); \ 678 addr += SECTION_SIZE)
680 } \ 679 section_update(addr, perms[i].mask,
680 set ? perms[i].prot : perms[i].clear, mm);
681 }
682
681} 683}
682 684
683static inline void fix_kernmem_perms(void) 685static void update_sections_early(struct section_perm perms[], int n)
684{ 686{
685 set_section_perms(nx_perms, prot); 687 struct task_struct *t, *s;
688
689 read_lock(&tasklist_lock);
690 for_each_process(t) {
691 if (t->flags & PF_KTHREAD)
692 continue;
693 for_each_thread(t, s)
694 set_section_perms(perms, n, true, s->mm);
695 }
696 read_unlock(&tasklist_lock);
697 set_section_perms(perms, n, true, current->active_mm);
698 set_section_perms(perms, n, true, &init_mm);
699}
700
701int __fix_kernmem_perms(void *unused)
702{
703 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
704 return 0;
705}
706
707void fix_kernmem_perms(void)
708{
709 stop_machine(__fix_kernmem_perms, NULL, NULL);
686} 710}
687 711
688#ifdef CONFIG_DEBUG_RODATA 712#ifdef CONFIG_DEBUG_RODATA
713int __mark_rodata_ro(void *unused)
714{
715 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
716 return 0;
717}
718
689void mark_rodata_ro(void) 719void mark_rodata_ro(void)
690{ 720{
691 set_section_perms(ro_perms, prot); 721 stop_machine(__mark_rodata_ro, NULL, NULL);
692} 722}
693 723
694void set_kernel_text_rw(void) 724void set_kernel_text_rw(void)
695{ 725{
696 set_section_perms(ro_perms, clear); 726 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
727 current->active_mm);
697} 728}
698 729
699void set_kernel_text_ro(void) 730void set_kernel_text_ro(void)
700{ 731{
701 set_section_perms(ro_perms, prot); 732 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
733 current->active_mm);
702} 734}
703#endif /* CONFIG_DEBUG_RODATA */ 735#endif /* CONFIG_DEBUG_RODATA */
704 736
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index de2b246fed38..8e1ea433c3f1 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
95.equ cpu_v7_suspend_size, 4 * 9 95.equ cpu_v7_suspend_size, 4 * 9
96#ifdef CONFIG_ARM_CPU_SUSPEND 96#ifdef CONFIG_ARM_CPU_SUSPEND
97ENTRY(cpu_v7_do_suspend) 97ENTRY(cpu_v7_do_suspend)
98 stmfd sp!, {r4 - r10, lr} 98 stmfd sp!, {r4 - r11, lr}
99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID 100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
101 stmia r0!, {r4 - r5} 101 stmia r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
114 stmia r0, {r5 - r11} 114 stmia r0, {r5 - r11}
115 ldmfd sp!, {r4 - r10, pc} 115 ldmfd sp!, {r4 - r11, pc}
116ENDPROC(cpu_v7_do_suspend) 116ENDPROC(cpu_v7_do_suspend)
117 117
118ENTRY(cpu_v7_do_resume) 118ENTRY(cpu_v7_do_resume)
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index e81cd48d6245..925552e7b4f3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -269,6 +269,7 @@
269 clock-frequency = <0>; /* Updated by bootloader */ 269 clock-frequency = <0>; /* Updated by bootloader */
270 voltage-ranges = <1800 1800 3300 3300>; 270 voltage-ranges = <1800 1800 3300 3300>;
271 sdhci,auto-cmd12; 271 sdhci,auto-cmd12;
272 little-endian;
272 bus-width = <4>; 273 bus-width = <4>;
273 }; 274 };
274 275
@@ -277,6 +278,7 @@
277 reg = <0x0 0x2300000 0x0 0x10000>; 278 reg = <0x0 0x2300000 0x0 0x10000>;
278 interrupts = <0 36 0x4>; /* Level high type */ 279 interrupts = <0 36 0x4>; /* Level high type */
279 gpio-controller; 280 gpio-controller;
281 little-endian;
280 #gpio-cells = <2>; 282 #gpio-cells = <2>;
281 interrupt-controller; 283 interrupt-controller;
282 #interrupt-cells = <2>; 284 #interrupt-cells = <2>;
@@ -287,6 +289,7 @@
287 reg = <0x0 0x2310000 0x0 0x10000>; 289 reg = <0x0 0x2310000 0x0 0x10000>;
288 interrupts = <0 36 0x4>; /* Level high type */ 290 interrupts = <0 36 0x4>; /* Level high type */
289 gpio-controller; 291 gpio-controller;
292 little-endian;
290 #gpio-cells = <2>; 293 #gpio-cells = <2>;
291 interrupt-controller; 294 interrupt-controller;
292 #interrupt-cells = <2>; 295 #interrupt-cells = <2>;
@@ -297,6 +300,7 @@
297 reg = <0x0 0x2320000 0x0 0x10000>; 300 reg = <0x0 0x2320000 0x0 0x10000>;
298 interrupts = <0 37 0x4>; /* Level high type */ 301 interrupts = <0 37 0x4>; /* Level high type */
299 gpio-controller; 302 gpio-controller;
303 little-endian;
300 #gpio-cells = <2>; 304 #gpio-cells = <2>;
301 interrupt-controller; 305 interrupt-controller;
302 #interrupt-cells = <2>; 306 #interrupt-cells = <2>;
@@ -307,6 +311,7 @@
307 reg = <0x0 0x2330000 0x0 0x10000>; 311 reg = <0x0 0x2330000 0x0 0x10000>;
308 interrupts = <0 37 0x4>; /* Level high type */ 312 interrupts = <0 37 0x4>; /* Level high type */
309 gpio-controller; 313 gpio-controller;
314 little-endian;
310 #gpio-cells = <2>; 315 #gpio-cells = <2>;
311 interrupt-controller; 316 interrupt-controller;
312 #interrupt-cells = <2>; 317 #interrupt-cells = <2>;
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 030cdcb46c6b..2731d3b25ed2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -77,6 +77,7 @@
77#ifndef __ASSEMBLY__ 77#ifndef __ASSEMBLY__
78 78
79#include <linux/stringify.h> 79#include <linux/stringify.h>
80#include <asm/barrier.h>
80 81
81/* 82/*
82 * Low-level accessors 83 * Low-level accessors
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7e074f93f383..63f52b55defe 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
276 * hardware updates of the pte (ptep_set_access_flags safely changes 276 * hardware updates of the pte (ptep_set_access_flags safely changes
277 * valid ptes without going through an invalid entry). 277 * valid ptes without going through an invalid entry).
278 */ 278 */
279 if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && 279 if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
280 pte_valid(*ptep)) { 280 pte_valid(*ptep) && pte_valid(pte)) {
281 BUG_ON(!pte_young(pte)); 281 VM_WARN_ONCE(!pte_young(pte),
282 BUG_ON(pte_write(*ptep) && !pte_dirty(pte)); 282 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
283 __func__, pte_val(*ptep), pte_val(pte));
284 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
285 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
286 __func__, pte_val(*ptep), pte_val(pte));
283 } 287 }
284 288
285 set_pte(ptep, pte); 289 set_pte(ptep, pte);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1ee2c3937d4e..71426a78db12 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/cache.h>
8#include <asm/kernel-pgtable.h> 9#include <asm/kernel-pgtable.h>
9#include <asm/thread_info.h> 10#include <asm/thread_info.h>
10#include <asm/memory.h> 11#include <asm/memory.h>
@@ -140,7 +141,7 @@ SECTIONS
140 ARM_EXIT_KEEP(EXIT_DATA) 141 ARM_EXIT_KEEP(EXIT_DATA)
141 } 142 }
142 143
143 PERCPU_SECTION(64) 144 PERCPU_SECTION(L1_CACHE_BYTES)
144 145
145 . = ALIGN(PAGE_SIZE); 146 . = ALIGN(PAGE_SIZE);
146 __init_end = .; 147 __init_end = .;
@@ -158,7 +159,7 @@ SECTIONS
158 . = ALIGN(PAGE_SIZE); 159 . = ALIGN(PAGE_SIZE);
159 _data = .; 160 _data = .;
160 _sdata = .; 161 _sdata = .;
161 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) 162 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
162 PECOFF_EDATA_PADDING 163 PECOFF_EDATA_PADDING
163 _edata = .; 164 _edata = .;
164 165
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 1e9c8b0bf486..170d786807c4 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -14,7 +14,7 @@
14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15 * Copyright (C) 2009 Jaswinder Singh Rajput 15 * Copyright (C) 2009 Jaswinder Singh Rajput
16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
19 * 19 *
20 * ppc: 20 * ppc:
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index db73390568c8..74c132d901bd 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 322 /* length of syscall table */ 14#define NR_syscalls 323 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 9038726e7d26..762edce7572e 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -335,5 +335,6 @@
335#define __NR_userfaultfd 1343 335#define __NR_userfaultfd 1343
336#define __NR_membarrier 1344 336#define __NR_membarrier 1344
337#define __NR_kcmp 1345 337#define __NR_kcmp 1345
338#define __NR_mlock2 1346
338 339
339#endif /* _UAPI_ASM_IA64_UNISTD_H */ 340#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index dcd97f84d065..534a74acb849 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1771,5 +1771,6 @@ sys_call_table:
1771 data8 sys_userfaultfd 1771 data8 sys_userfaultfd
1772 data8 sys_membarrier 1772 data8 sys_membarrier
1773 data8 sys_kcmp // 1345 1773 data8 sys_kcmp // 1345
1774 data8 sys_mlock2
1774 1775
1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1776 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index c89da6312954..bf4dec229437 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
61 /* FIXME this part of code is untested */ 61 /* FIXME this part of code is untested */
62 for_each_sg(sgl, sg, nents, i) { 62 for_each_sg(sgl, sg, nents, i) {
63 sg->dma_address = sg_phys(sg); 63 sg->dma_address = sg_phys(sg);
64 __dma_sync(sg_phys(sg), sg->length, direction); 64 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
65 sg->length, direction);
65 } 66 }
66 67
67 return nents; 68 return nents;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index d8117be729a2..730d394ce5f0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
145 145
146 gfp = massage_gfp_flags(dev, gfp); 146 gfp = massage_gfp_flags(dev, gfp);
147 147
148 if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) 148 if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
149 page = dma_alloc_from_contiguous(dev, 149 page = dma_alloc_from_contiguous(dev,
150 count, get_order(size)); 150 count, get_order(size));
151 if (!page) 151 if (!page)
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index d8534f95915a..291cee28ccb6 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
372 */ 372 */
373#ifdef CONFIG_HUGETLB_PAGE 373#ifdef CONFIG_HUGETLB_PAGE
374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) 374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
375#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE)) 375#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
376 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
376#else 377#else
377#define pte_huge(pte) (0) 378#define pte_huge(pte) (0)
378#define pte_mkhuge(pte) (pte) 379#define pte_mkhuge(pte) (pte)
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 33170384d3ac..35bdccbb2036 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -360,8 +360,9 @@
360#define __NR_execveat (__NR_Linux + 342) 360#define __NR_execveat (__NR_Linux + 342)
361#define __NR_membarrier (__NR_Linux + 343) 361#define __NR_membarrier (__NR_Linux + 343)
362#define __NR_userfaultfd (__NR_Linux + 344) 362#define __NR_userfaultfd (__NR_Linux + 344)
363#define __NR_mlock2 (__NR_Linux + 345)
363 364
364#define __NR_Linux_syscalls (__NR_userfaultfd + 1) 365#define __NR_Linux_syscalls (__NR_mlock2 + 1)
365 366
366 367
367#define __IGNORE_select /* newselect */ 368#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 64f2764a8cef..c99f3dde455c 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
171} 171}
172 172
173 173
174void __init pcibios_init_bus(struct pci_bus *bus)
175{
176 struct pci_dev *dev = bus->self;
177 unsigned short bridge_ctl;
178
179 /* We deal only with pci controllers and pci-pci bridges. */
180 if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
181 return;
182
183 /* PCI-PCI bridge - set the cache line and default latency
184 (32) for primary and secondary buses. */
185 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
186
187 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
188 bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
189 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
190}
191
192/* 174/*
193 * pcibios align resources() is called every time generic PCI code 175 * pcibios align resources() is called every time generic PCI code
194 * wants to generate a new address. The process of looking for 176 * wants to generate a new address. The process of looking for
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 78c3ef8c348d..d4ffcfbc9885 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -440,6 +440,7 @@
440 ENTRY_COMP(execveat) 440 ENTRY_COMP(execveat)
441 ENTRY_SAME(membarrier) 441 ENTRY_SAME(membarrier)
442 ENTRY_SAME(userfaultfd) 442 ENTRY_SAME(userfaultfd)
443 ENTRY_SAME(mlock2) /* 345 */
443 444
444 445
445.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) 446.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index 631ede72e226..68f0ed7626bd 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -227,23 +227,15 @@
227 reg = <0x520 0x20>; 227 reg = <0x520 0x20>;
228 228
229 phy0: ethernet-phy@1f { 229 phy0: ethernet-phy@1f {
230 interrupt-parent = <&mpic>;
231 interrupts = <10 1>;
232 reg = <0x1f>; 230 reg = <0x1f>;
233 }; 231 };
234 phy1: ethernet-phy@0 { 232 phy1: ethernet-phy@0 {
235 interrupt-parent = <&mpic>;
236 interrupts = <10 1>;
237 reg = <0>; 233 reg = <0>;
238 }; 234 };
239 phy2: ethernet-phy@1 { 235 phy2: ethernet-phy@1 {
240 interrupt-parent = <&mpic>;
241 interrupts = <10 1>;
242 reg = <1>; 236 reg = <1>;
243 }; 237 };
244 phy3: ethernet-phy@2 { 238 phy3: ethernet-phy@2 {
245 interrupt-parent = <&mpic>;
246 interrupts = <10 1>;
247 reg = <2>; 239 reg = <2>;
248 }; 240 };
249 tbi0: tbi-phy@11 { 241 tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index f2b0b1b0c72a..5654ece02c0d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -370,16 +370,16 @@ COMPAT_SYS(execveat)
370PPC64ONLY(switch_endian) 370PPC64ONLY(switch_endian)
371SYSCALL_SPU(userfaultfd) 371SYSCALL_SPU(userfaultfd)
372SYSCALL_SPU(membarrier) 372SYSCALL_SPU(membarrier)
373SYSCALL(semop) 373SYSCALL(ni_syscall)
374SYSCALL(semget) 374SYSCALL(ni_syscall)
375COMPAT_SYS(semctl) 375SYSCALL(ni_syscall)
376COMPAT_SYS(semtimedop) 376SYSCALL(ni_syscall)
377COMPAT_SYS(msgsnd) 377SYSCALL(ni_syscall)
378COMPAT_SYS(msgrcv) 378SYSCALL(ni_syscall)
379SYSCALL(msgget) 379SYSCALL(ni_syscall)
380COMPAT_SYS(msgctl) 380SYSCALL(ni_syscall)
381COMPAT_SYS(shmat) 381SYSCALL(ni_syscall)
382SYSCALL(shmdt) 382SYSCALL(ni_syscall)
383SYSCALL(shmget) 383SYSCALL(ni_syscall)
384COMPAT_SYS(shmctl) 384SYSCALL(ni_syscall)
385SYSCALL(mlock2) 385SYSCALL(mlock2)
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 1effea5193d6..12a05652377a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -388,18 +388,6 @@
388#define __NR_switch_endian 363 388#define __NR_switch_endian 363
389#define __NR_userfaultfd 364 389#define __NR_userfaultfd 364
390#define __NR_membarrier 365 390#define __NR_membarrier 365
391#define __NR_semop 366
392#define __NR_semget 367
393#define __NR_semctl 368
394#define __NR_semtimedop 369
395#define __NR_msgsnd 370
396#define __NR_msgrcv 371
397#define __NR_msgget 372
398#define __NR_msgctl 373
399#define __NR_shmat 374
400#define __NR_shmdt 375
401#define __NR_shmget 376
402#define __NR_shmctl 377
403#define __NR_mlock2 378 391#define __NR_mlock2 378
404 392
405#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 393#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 80dfe8965df9..8d14feb40f12 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
590 eeh_ops->configure_bridge(pe); 590 eeh_ops->configure_bridge(pe);
591 eeh_pe_restore_bars(pe); 591 eeh_pe_restore_bars(pe);
592 592
593 /* 593 /* Clear frozen state */
594 * If it's PHB PE, the frozen state on all available PEs should have 594 rc = eeh_clear_pe_frozen_state(pe, false);
595 * been cleared by the PHB reset. Otherwise, we unfreeze the PE and its 595 if (rc)
596 * child PEs because they might be in frozen state. 596 return rc;
597 */
598 if (!(pe->type & EEH_PE_PHB)) {
599 rc = eeh_clear_pe_frozen_state(pe, false);
600 if (rc)
601 return rc;
602 }
603 597
604 /* Give the system 5 seconds to finish running the user-space 598 /* Give the system 5 seconds to finish running the user-space
605 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 599 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 54b45b73195f..a7352b59e6f9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
224 224
225static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 225static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
226{ 226{
227 /*
228 * Check for illegal transactional state bit combination
229 * and if we find it, force the TS field to a safe state.
230 */
231 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
232 msr &= ~MSR_TS_MASK;
227 vcpu->arch.shregs.msr = msr; 233 vcpu->arch.shregs.msr = msr;
228 kvmppc_end_cede(vcpu); 234 kvmppc_end_cede(vcpu);
229} 235}
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 6ccfb6c1c707..e505223b4ec5 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
43static unsigned int *opal_irqs; 43static unsigned int *opal_irqs;
44 44
45static void opal_handle_irq_work(struct irq_work *work); 45static void opal_handle_irq_work(struct irq_work *work);
46static __be64 last_outstanding_events; 46static u64 last_outstanding_events;
47static struct irq_work opal_event_irq_work = { 47static struct irq_work opal_event_irq_work = {
48 .func = opal_handle_irq_work, 48 .func = opal_handle_irq_work,
49}; 49};
50 50
51void opal_handle_events(uint64_t events)
52{
53 int virq, hwirq = 0;
54 u64 mask = opal_event_irqchip.mask;
55
56 if (!in_irq() && (events & mask)) {
57 last_outstanding_events = events;
58 irq_work_queue(&opal_event_irq_work);
59 return;
60 }
61
62 while (events & mask) {
63 hwirq = fls64(events) - 1;
64 if (BIT_ULL(hwirq) & mask) {
65 virq = irq_find_mapping(opal_event_irqchip.domain,
66 hwirq);
67 if (virq)
68 generic_handle_irq(virq);
69 }
70 events &= ~BIT_ULL(hwirq);
71 }
72}
73
51static void opal_event_mask(struct irq_data *d) 74static void opal_event_mask(struct irq_data *d)
52{ 75{
53 clear_bit(d->hwirq, &opal_event_irqchip.mask); 76 clear_bit(d->hwirq, &opal_event_irqchip.mask);
@@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
55 78
56static void opal_event_unmask(struct irq_data *d) 79static void opal_event_unmask(struct irq_data *d)
57{ 80{
81 __be64 events;
82
58 set_bit(d->hwirq, &opal_event_irqchip.mask); 83 set_bit(d->hwirq, &opal_event_irqchip.mask);
59 84
60 opal_poll_events(&last_outstanding_events); 85 opal_poll_events(&events);
86 last_outstanding_events = be64_to_cpu(events);
87
88 /*
89 * We can't just handle the events now with opal_handle_events().
90 * If we did we would deadlock when opal_event_unmask() is called from
91 * handle_level_irq() with the irq descriptor lock held, because
92 * calling opal_handle_events() would call generic_handle_irq() and
93 * then handle_level_irq() which would try to take the descriptor lock
94 * again. Instead queue the events for later.
95 */
61 if (last_outstanding_events & opal_event_irqchip.mask) 96 if (last_outstanding_events & opal_event_irqchip.mask)
62 /* Need to retrigger the interrupt */ 97 /* Need to retrigger the interrupt */
63 irq_work_queue(&opal_event_irq_work); 98 irq_work_queue(&opal_event_irq_work);
@@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
96 return 0; 131 return 0;
97} 132}
98 133
99void opal_handle_events(uint64_t events)
100{
101 int virq, hwirq = 0;
102 u64 mask = opal_event_irqchip.mask;
103
104 if (!in_irq() && (events & mask)) {
105 last_outstanding_events = events;
106 irq_work_queue(&opal_event_irq_work);
107 return;
108 }
109
110 while (events & mask) {
111 hwirq = fls64(events) - 1;
112 if (BIT_ULL(hwirq) & mask) {
113 virq = irq_find_mapping(opal_event_irqchip.domain,
114 hwirq);
115 if (virq)
116 generic_handle_irq(virq);
117 }
118 events &= ~BIT_ULL(hwirq);
119 }
120}
121
122static irqreturn_t opal_interrupt(int irq, void *data) 134static irqreturn_t opal_interrupt(int irq, void *data)
123{ 135{
124 __be64 events; 136 __be64 events;
@@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
131 143
132static void opal_handle_irq_work(struct irq_work *work) 144static void opal_handle_irq_work(struct irq_work *work)
133{ 145{
134 opal_handle_events(be64_to_cpu(last_outstanding_events)); 146 opal_handle_events(last_outstanding_events);
135} 147}
136 148
137static int opal_event_match(struct irq_domain *h, struct device_node *node, 149static int opal_event_match(struct irq_domain *h, struct device_node *node,
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 4296d55e88f3..57cffb80bc36 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -278,7 +278,7 @@ static void opal_handle_message(void)
278 278
279 /* Sanity check */ 279 /* Sanity check */
280 if (type >= OPAL_MSG_TYPE_MAX) { 280 if (type >= OPAL_MSG_TYPE_MAX) {
281 pr_warning("%s: Unknown message type: %u\n", __func__, type); 281 pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
282 return; 282 return;
283 } 283 }
284 opal_message_do_notify(type, (void *)&msg); 284 opal_message_do_notify(type, (void *)&msg);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 8140d10c6785..6e72961608f0 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1920 } 1920 }
1921 if (separator) 1921 if (separator)
1922 ptr += sprintf(ptr, "%c", separator); 1922 ptr += sprintf(ptr, "%c", separator);
1923 /*
1924 * Use four '%' characters below because of the
1925 * following two conversions:
1926 *
1927 * 1) sprintf: %%%%r -> %%r
1928 * 2) printk : %%r -> %r
1929 */
1923 if (operand->flags & OPERAND_GPR) 1930 if (operand->flags & OPERAND_GPR)
1924 ptr += sprintf(ptr, "%%r%i", value); 1931 ptr += sprintf(ptr, "%%%%r%i", value);
1925 else if (operand->flags & OPERAND_FPR) 1932 else if (operand->flags & OPERAND_FPR)
1926 ptr += sprintf(ptr, "%%f%i", value); 1933 ptr += sprintf(ptr, "%%%%f%i", value);
1927 else if (operand->flags & OPERAND_AR) 1934 else if (operand->flags & OPERAND_AR)
1928 ptr += sprintf(ptr, "%%a%i", value); 1935 ptr += sprintf(ptr, "%%%%a%i", value);
1929 else if (operand->flags & OPERAND_CR) 1936 else if (operand->flags & OPERAND_CR)
1930 ptr += sprintf(ptr, "%%c%i", value); 1937 ptr += sprintf(ptr, "%%%%c%i", value);
1931 else if (operand->flags & OPERAND_VR) 1938 else if (operand->flags & OPERAND_VR)
1932 ptr += sprintf(ptr, "%%v%i", value); 1939 ptr += sprintf(ptr, "%%%%v%i", value);
1933 else if (operand->flags & OPERAND_PCREL) 1940 else if (operand->flags & OPERAND_PCREL)
1934 ptr += sprintf(ptr, "%lx", (signed int) value 1941 ptr += sprintf(ptr, "%lx", (signed int) value
1935 + addr); 1942 + addr);
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index e6820c86e8c7..47ebd5b5ed55 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -278,7 +278,7 @@
278#define __NR_fsetxattr 256 278#define __NR_fsetxattr 256
279#define __NR_getxattr 257 279#define __NR_getxattr 257
280#define __NR_lgetxattr 258 280#define __NR_lgetxattr 258
281#define __NR_fgetxattr 269 281#define __NR_fgetxattr 259
282#define __NR_listxattr 260 282#define __NR_listxattr 260
283#define __NR_llistxattr 261 283#define __NR_llistxattr 261
284#define __NR_flistxattr 262 284#define __NR_flistxattr 262
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7cfd7f153966..4dca18347ee9 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -10,7 +10,7 @@
10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
11 * Copyright (C) 2009 Jaswinder Singh Rajput 11 * Copyright (C) 2009 Jaswinder Singh Rajput
12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
15 * 15 *
16 * ppc: 16 * ppc:
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b0da5aedb336..3091267c5cc3 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -9,7 +9,7 @@
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput 10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
13 */ 13 */
14 14
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index bb509cee3b59..8767060d70fb 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -21,7 +21,7 @@
21 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 21 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
22 * Copyright (C) 2009 Jaswinder Singh Rajput 22 * Copyright (C) 2009 Jaswinder Singh Rajput
23 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 23 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
24 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 24 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
25 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 25 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
26 * Copyright (C) 2009 Google, Inc., Stephane Eranian 26 * Copyright (C) 2009 Google, Inc., Stephane Eranian
27 */ 27 */
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 25ed4098640e..e3abe6f3156d 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
131# The wrappers will select whether using "malloc" or the kernel allocator. 131# The wrappers will select whether using "malloc" or the kernel allocator.
132LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc 132LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
133 133
134LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt 134LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
135 135
136# Used by link-vmlinux.sh which has special support for um link 136# Used by link-vmlinux.sh which has special support for um link
137export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) 137export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
diff --git a/arch/um/drivers/net_user.c b/arch/um/drivers/net_user.c
index e697a4136707..e9f8445861dc 100644
--- a/arch/um/drivers/net_user.c
+++ b/arch/um/drivers/net_user.c
@@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
249 249
250char *split_if_spec(char *str, ...) 250char *split_if_spec(char *str, ...)
251{ 251{
252 char **arg, *end; 252 char **arg, *end, *ret = NULL;
253 va_list ap; 253 va_list ap;
254 254
255 va_start(ap, str); 255 va_start(ap, str);
256 while ((arg = va_arg(ap, char **)) != NULL) { 256 while ((arg = va_arg(ap, char **)) != NULL) {
257 if (*str == '\0') 257 if (*str == '\0')
258 return NULL; 258 goto out;
259 end = strchr(str, ','); 259 end = strchr(str, ',');
260 if (end != str) 260 if (end != str)
261 *arg = str; 261 *arg = str;
262 if (end == NULL) 262 if (end == NULL)
263 return NULL; 263 goto out;
264 *end++ = '\0'; 264 *end++ = '\0';
265 str = end; 265 str = end;
266 } 266 }
267 ret = str;
268out:
267 va_end(ap); 269 va_end(ap);
268 return str; 270 return ret;
269} 271}
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 57acbd67d85d..fc8be0e3a4ff 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
69 struct ksignal ksig; 69 struct ksignal ksig;
70 int handled_sig = 0; 70 int handled_sig = 0;
71 71
72 while (get_signal(&ksig)) { 72 if (get_signal(&ksig)) {
73 handled_sig = 1; 73 handled_sig = 1;
74 /* Whee! Actually deliver the signal. */ 74 /* Whee! Actually deliver the signal. */
75 handle_signal(&ksig, regs); 75 handle_signal(&ksig, regs);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4562cf070c27..2bf79d7c97df 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 499f533dd3cc..d0e35ebb2adb 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
@@ -387,7 +387,7 @@ struct cpu_hw_events {
387/* Check flags and event code/umask, and set the HSW N/A flag */ 387/* Check flags and event code/umask, and set the HSW N/A flag */
388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
389 __EVENT_CONSTRAINT(code, n, \ 389 __EVENT_CONSTRAINT(code, n, \
390 INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ 390 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
392 392
393 393
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
627 u64 lbr_from[MAX_LBR_ENTRIES]; 627 u64 lbr_from[MAX_LBR_ENTRIES];
628 u64 lbr_to[MAX_LBR_ENTRIES]; 628 u64 lbr_to[MAX_LBR_ENTRIES];
629 u64 lbr_info[MAX_LBR_ENTRIES]; 629 u64 lbr_info[MAX_LBR_ENTRIES];
630 int tos;
630 int lbr_callstack_users; 631 int lbr_callstack_users;
631 int lbr_stack_state; 632 int lbr_stack_state;
632}; 633};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f63360be2238..e2a430021e46 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
235 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */ 235 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8ed391..a316ca96f1b6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) 298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
299{ 299{
300 if (event->attach_state & PERF_ATTACH_TASK) 300 if (event->attach_state & PERF_ATTACH_TASK)
301 return perf_cgroup_from_task(event->hw.target); 301 return perf_cgroup_from_task(event->hw.target, event->ctx);
302 302
303 return event->cgrp; 303 return event->cgrp;
304} 304}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index bfd0b717e944..659f01e165d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
239 } 239 }
240 240
241 mask = x86_pmu.lbr_nr - 1; 241 mask = x86_pmu.lbr_nr - 1;
242 tos = intel_pmu_lbr_tos(); 242 tos = task_ctx->tos;
243 for (i = 0; i < tos; i++) { 243 for (i = 0; i < tos; i++) {
244 lbr_idx = (tos - i) & mask; 244 lbr_idx = (tos - i) & mask;
245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
249 } 249 }
250 wrmsrl(x86_pmu.lbr_tos, tos);
250 task_ctx->lbr_stack_state = LBR_NONE; 251 task_ctx->lbr_stack_state = LBR_NONE;
251} 252}
252 253
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
270 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 271 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
271 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 272 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
272 } 273 }
274 task_ctx->tos = tos;
273 task_ctx->lbr_stack_state = LBR_VALID; 275 task_ctx->lbr_stack_state = LBR_VALID;
274} 276}
275 277
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index dc5fa6a1e8d6..3512ba607361 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * x86 specific code for irq_work 2 * x86 specific code for irq_work
3 * 3 *
4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 06332cb7e7d1..3f5c48ddba45 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
38 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 38 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
39} 39}
40 40
41static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
42{
43 struct kvm_cpuid_entry2 *best;
44
45 best = kvm_find_cpuid_entry(vcpu, 1, 0);
46 return best && (best->edx & bit(X86_FEATURE_MTRR));
47}
48
41static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) 49static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
42{ 50{
43 struct kvm_cpuid_entry2 *best; 51 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 9e8bf13572e6..3f8c732117ec 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void) 123static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124{ 124{
125 /* 125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when 126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC 127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory. 128 * memory type is applied to all of physical memory.
129 *
130 * However, virtual machines can be run with CPUID such that
131 * there are no MTRRs. In that case, the firmware will never
132 * enable MTRRs and it is obviously undesirable to run the
133 * guest entirely with UC memory and we use WB.
129 */ 134 */
130 return MTRR_TYPE_UNCACHABLE; 135 if (guest_cpuid_has_mtrr(vcpu))
136 return MTRR_TYPE_UNCACHABLE;
137 else
138 return MTRR_TYPE_WRBACK;
131} 139}
132 140
133/* 141/*
@@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
267 275
268 for (seg = 0; seg < seg_num; seg++) { 276 for (seg = 0; seg < seg_num; seg++) {
269 mtrr_seg = &fixed_seg_table[seg]; 277 mtrr_seg = &fixed_seg_table[seg];
270 if (mtrr_seg->start >= addr && addr < mtrr_seg->end) 278 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
271 return seg; 279 return seg;
272 } 280 }
273 281
@@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
300 *start = range->base & PAGE_MASK; 308 *start = range->base & PAGE_MASK;
301 309
302 mask = range->mask & PAGE_MASK; 310 mask = range->mask & PAGE_MASK;
303 mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
304 311
305 /* This cannot overflow because writing to the reserved bits of 312 /* This cannot overflow because writing to the reserved bits of
306 * variable MTRRs causes a #GP. 313 * variable MTRRs causes a #GP.
@@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
356 if (var_mtrr_range_is_valid(cur)) 363 if (var_mtrr_range_is_valid(cur))
357 list_del(&mtrr_state->var_ranges[index].node); 364 list_del(&mtrr_state->var_ranges[index].node);
358 365
366 /* Extend the mask with all 1 bits to the left, since those
367 * bits must implicitly be 0. The bits are then cleared
368 * when reading them.
369 */
359 if (!is_mtrr_mask) 370 if (!is_mtrr_mask)
360 cur->base = data; 371 cur->base = data;
361 else 372 else
362 cur->mask = data; 373 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
363 374
364 /* add it to the list if it's enabled. */ 375 /* add it to the list if it's enabled. */
365 if (var_mtrr_range_is_valid(cur)) { 376 if (var_mtrr_range_is_valid(cur)) {
@@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
426 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; 437 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
427 else 438 else
428 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; 439 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
440
441 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
429 } 442 }
430 443
431 return 0; 444 return 0;
@@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
670 } 683 }
671 684
672 if (iter.mtrr_disabled) 685 if (iter.mtrr_disabled)
673 return mtrr_disabled_type(); 686 return mtrr_disabled_type(vcpu);
674 687
675 /* not contained in any MTRRs. */ 688 /* not contained in any MTRRs. */
676 if (type == -1) 689 if (type == -1)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 83a1c643f9a5..899c40f826dd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
3422 struct kvm_run *kvm_run = vcpu->run; 3422 struct kvm_run *kvm_run = vcpu->run;
3423 u32 exit_code = svm->vmcb->control.exit_code; 3423 u32 exit_code = svm->vmcb->control.exit_code;
3424 3424
3425 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3426
3425 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) 3427 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3426 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3428 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3427 if (npt_enabled) 3429 if (npt_enabled)
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3892 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3894 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3893 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3895 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3894 3896
3895 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3896
3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3898 kvm_before_handle_nmi(&svm->vcpu); 3898 kvm_before_handle_nmi(&svm->vcpu);
3899 3899
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index af823a388c19..44976a596fa6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2803 msr_info->data = vcpu->arch.ia32_xss; 2803 msr_info->data = vcpu->arch.ia32_xss;
2804 break; 2804 break;
2805 case MSR_TSC_AUX: 2805 case MSR_TSC_AUX:
2806 if (!guest_cpuid_has_rdtscp(vcpu)) 2806 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2807 return 1; 2807 return 1;
2808 /* Otherwise falls through */ 2808 /* Otherwise falls through */
2809 default: 2809 default:
@@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); 2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
2910 break; 2910 break;
2911 case MSR_TSC_AUX: 2911 case MSR_TSC_AUX:
2912 if (!guest_cpuid_has_rdtscp(vcpu)) 2912 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2913 return 1; 2913 return 1;
2914 /* Check reserved bit, higher 32 bits should be zero */ 2914 /* Check reserved bit, higher 32 bits should be zero */
2915 if ((data >> 32) != 0) 2915 if ((data >> 32) != 0)
@@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8042 u32 exit_reason = vmx->exit_reason; 8042 u32 exit_reason = vmx->exit_reason;
8043 u32 vectoring_info = vmx->idt_vectoring_info; 8043 u32 vectoring_info = vmx->idt_vectoring_info;
8044 8044
8045 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
8046
8045 /* 8047 /*
8046 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 8048 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
8047 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 8049 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
8668 vmx->loaded_vmcs->launched = 1; 8670 vmx->loaded_vmcs->launched = 1;
8669 8671
8670 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 8672 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
8671 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
8672 8673
8673 /* 8674 /*
8674 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if 8675 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eed32283d22c..7ffc224bbe41 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3572 3572
3573static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3573static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3574{ 3574{
3575 int i;
3575 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3576 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3576 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3577 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3577 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3578 for (i = 0; i < 3; i++)
3579 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
3578 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3580 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3579 return 0; 3581 return 0;
3580} 3582}
@@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3593static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3595static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3594{ 3596{
3595 int start = 0; 3597 int start = 0;
3598 int i;
3596 u32 prev_legacy, cur_legacy; 3599 u32 prev_legacy, cur_legacy;
3597 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3600 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3598 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 3601 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3602 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3605 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3603 sizeof(kvm->arch.vpit->pit_state.channels)); 3606 sizeof(kvm->arch.vpit->pit_state.channels));
3604 kvm->arch.vpit->pit_state.flags = ps->flags; 3607 kvm->arch.vpit->pit_state.flags = ps->flags;
3605 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3608 for (i = 0; i < 3; i++)
3609 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
3606 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3610 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3607 return 0; 3611 return 0;
3608} 3612}
@@ -6515,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6515 if (req_immediate_exit) 6519 if (req_immediate_exit)
6516 smp_send_reschedule(vcpu->cpu); 6520 smp_send_reschedule(vcpu->cpu);
6517 6521
6522 trace_kvm_entry(vcpu->vcpu_id);
6523 wait_lapic_expire(vcpu);
6518 __kvm_guest_enter(); 6524 __kvm_guest_enter();
6519 6525
6520 if (unlikely(vcpu->arch.switch_db_regs)) { 6526 if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6527,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6527 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 6533 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6528 } 6534 }
6529 6535
6530 trace_kvm_entry(vcpu->vcpu_id);
6531 wait_lapic_expire(vcpu);
6532 kvm_x86_ops->run(vcpu); 6536 kvm_x86_ops->run(vcpu);
6533 6537
6534 /* 6538 /*
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a035c2aa7801..0f1c6fc3ddd8 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
89 { 0/* VMALLOC_START */, "vmalloc() Area" }, 89 { 0/* VMALLOC_START */, "vmalloc() Area" },
90 { 0/*VMALLOC_END*/, "vmalloc() End" }, 90 { 0/*VMALLOC_END*/, "vmalloc() End" },
91# ifdef CONFIG_HIGHMEM 91# ifdef CONFIG_HIGHMEM
92 { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, 92 { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
93# endif 93# endif
94 { 0/*FIXADDR_START*/, "Fixmap Area" }, 94 { 0/*FIXADDR_START*/, "Fixmap Area" },
95#endif 95#endif
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 06934a8a4872..14fcd01ed992 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
211 if (err) 211 if (err)
212 return 1; 212 return 1;
213 213
214 err = convert_fxsr_from_user(&fpx, sc.fpstate); 214 err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
215 if (err) 215 if (err)
216 return 1; 216 return 1;
217 217
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
227 { 227 {
228 struct user_i387_struct fp; 228 struct user_i387_struct fp;
229 229
230 err = copy_from_user(&fp, sc.fpstate, 230 err = copy_from_user(&fp, (void *)sc.fpstate,
231 sizeof(struct user_i387_struct)); 231 sizeof(struct user_i387_struct));
232 if (err) 232 if (err)
233 return 1; 233 return 1;
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
291#endif 291#endif
292#undef PUTREG 292#undef PUTREG
293 sc.oldmask = mask; 293 sc.oldmask = mask;
294 sc.fpstate = to_fp; 294 sc.fpstate = (unsigned long)to_fp;
295 295
296 err = copy_to_user(to, &sc, sizeof(struct sigcontext)); 296 err = copy_to_user(to, &sc, sizeof(struct sigcontext));
297 if (err) 297 if (err)
@@ -468,12 +468,10 @@ long sys_sigreturn(void)
468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); 468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
469 sigset_t set; 469 sigset_t set;
470 struct sigcontext __user *sc = &frame->sc; 470 struct sigcontext __user *sc = &frame->sc;
471 unsigned long __user *oldmask = &sc->oldmask;
472 unsigned long __user *extramask = frame->extramask;
473 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); 471 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
474 472
475 if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || 473 if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
476 copy_from_user(&set.sig[1], extramask, sig_size)) 474 copy_from_user(&set.sig[1], frame->extramask, sig_size))
477 goto segfault; 475 goto segfault;
478 476
479 set_current_blocked(&set); 477 set_current_blocked(&set);
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
505{ 503{
506 struct rt_sigframe __user *frame; 504 struct rt_sigframe __user *frame;
507 int err = 0, sig = ksig->sig; 505 int err = 0, sig = ksig->sig;
506 unsigned long fp_to;
508 507
509 frame = (struct rt_sigframe __user *) 508 frame = (struct rt_sigframe __user *)
510 round_down(stack_top - sizeof(struct rt_sigframe), 16); 509 round_down(stack_top - sizeof(struct rt_sigframe), 16);
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
526 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs)); 525 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
527 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, 526 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
528 set->sig[0]); 527 set->sig[0]);
529 err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); 528
529 fp_to = (unsigned long)&frame->fpstate;
530
531 err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
530 if (sizeof(*set) == 16) { 532 if (sizeof(*set) == 16) {
531 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 533 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
532 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 534 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ac161db63388..cb5e266a8bf7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
2495{ 2495{
2496 x86_init.paging.pagetable_init = xen_pagetable_init; 2496 x86_init.paging.pagetable_init = xen_pagetable_init;
2497 2497
2498 /* Optimization - we can use the HVM one but it has no idea which 2498 if (xen_feature(XENFEAT_auto_translated_physmap))
2499 * VCPUs are descheduled - which means that it will needlessly IPI
2500 * them. Xen knows so let it do the job.
2501 */
2502 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2503 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2504 return; 2499 return;
2505 } 2500
2506 pv_mmu_ops = xen_mmu_ops; 2501 pv_mmu_ops = xen_mmu_ops;
2507 2502
2508 memset(dummy_mapping, 0xff, PAGE_SIZE); 2503 memset(dummy_mapping, 0xff, PAGE_SIZE);
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index feddabdab448..3705eabd7e22 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
68 68
69void xen_arch_pre_suspend(void) 69void xen_arch_pre_suspend(void)
70{ 70{
71 int cpu;
72
73 for_each_online_cpu(cpu)
74 xen_pmu_finish(cpu);
75
76 if (xen_pv_domain()) 71 if (xen_pv_domain())
77 xen_pv_pre_suspend(); 72 xen_pv_pre_suspend();
78} 73}
79 74
80void xen_arch_post_suspend(int cancelled) 75void xen_arch_post_suspend(int cancelled)
81{ 76{
82 int cpu;
83
84 if (xen_pv_domain()) 77 if (xen_pv_domain())
85 xen_pv_post_suspend(cancelled); 78 xen_pv_post_suspend(cancelled);
86 else 79 else
87 xen_hvm_post_suspend(cancelled); 80 xen_hvm_post_suspend(cancelled);
88
89 for_each_online_cpu(cpu)
90 xen_pmu_init(cpu);
91} 81}
92 82
93static void xen_vcpu_notify_restore(void *data) 83static void xen_vcpu_notify_restore(void *data)
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
106 96
107void xen_arch_resume(void) 97void xen_arch_resume(void)
108{ 98{
99 int cpu;
100
109 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 101 on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
102
103 for_each_online_cpu(cpu)
104 xen_pmu_init(cpu);
110} 105}
111 106
112void xen_arch_suspend(void) 107void xen_arch_suspend(void)
113{ 108{
109 int cpu;
110
111 for_each_online_cpu(cpu)
112 xen_pmu_finish(cpu);
113
114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); 114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
115} 115}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5bcdfc10c23a..5a37188b559f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
1127 * of the main cic data structures. For now we allow a task to change 1127 * of the main cic data structures. For now we allow a task to change
1128 * its cgroup only if it's the only owner of its ioc. 1128 * its cgroup only if it's the only owner of its ioc.
1129 */ 1129 */
1130static int blkcg_can_attach(struct cgroup_subsys_state *css, 1130static int blkcg_can_attach(struct cgroup_taskset *tset)
1131 struct cgroup_taskset *tset)
1132{ 1131{
1133 struct task_struct *task; 1132 struct task_struct *task;
1133 struct cgroup_subsys_state *dst_css;
1134 struct io_context *ioc; 1134 struct io_context *ioc;
1135 int ret = 0; 1135 int ret = 0;
1136 1136
1137 /* task_lock() is needed to avoid races with exit_io_context() */ 1137 /* task_lock() is needed to avoid races with exit_io_context() */
1138 cgroup_taskset_for_each(task, tset) { 1138 cgroup_taskset_for_each(task, dst_css, tset) {
1139 task_lock(task); 1139 task_lock(task);
1140 ioc = task->io_context; 1140 ioc = task->io_context;
1141 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 1141 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
diff --git a/block/blk-core.c b/block/blk-core.c
index a0af4043dda2..c487b94c59e3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1689,8 +1689,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1689 struct request *req; 1689 struct request *req;
1690 unsigned int request_count = 0; 1690 unsigned int request_count = 0;
1691 1691
1692 blk_queue_split(q, &bio, q->bio_split);
1693
1694 /* 1692 /*
1695 * low level driver can indicate that it wants pages above a 1693 * low level driver can indicate that it wants pages above a
1696 * certain limit bounced to low memory (ie for highmem, or even 1694 * certain limit bounced to low memory (ie for highmem, or even
@@ -1698,6 +1696,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1698 */ 1696 */
1699 blk_queue_bounce(q, &bio); 1697 blk_queue_bounce(q, &bio);
1700 1698
1699 blk_queue_split(q, &bio, q->bio_split);
1700
1701 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1701 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1702 bio->bi_error = -EIO; 1702 bio->bi_error = -EIO;
1703 bio_endio(bio); 1703 bio_endio(bio);
@@ -3405,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
3405{ 3405{
3406 int ret = 0; 3406 int ret = 0;
3407 3407
3408 if (!q->dev)
3409 return ret;
3410
3408 spin_lock_irq(q->queue_lock); 3411 spin_lock_irq(q->queue_lock);
3409 if (q->nr_pending) { 3412 if (q->nr_pending) {
3410 ret = -EBUSY; 3413 ret = -EBUSY;
@@ -3432,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
3432 */ 3435 */
3433void blk_post_runtime_suspend(struct request_queue *q, int err) 3436void blk_post_runtime_suspend(struct request_queue *q, int err)
3434{ 3437{
3438 if (!q->dev)
3439 return;
3440
3435 spin_lock_irq(q->queue_lock); 3441 spin_lock_irq(q->queue_lock);
3436 if (!err) { 3442 if (!err) {
3437 q->rpm_status = RPM_SUSPENDED; 3443 q->rpm_status = RPM_SUSPENDED;
@@ -3456,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
3456 */ 3462 */
3457void blk_pre_runtime_resume(struct request_queue *q) 3463void blk_pre_runtime_resume(struct request_queue *q)
3458{ 3464{
3465 if (!q->dev)
3466 return;
3467
3459 spin_lock_irq(q->queue_lock); 3468 spin_lock_irq(q->queue_lock);
3460 q->rpm_status = RPM_RESUMING; 3469 q->rpm_status = RPM_RESUMING;
3461 spin_unlock_irq(q->queue_lock); 3470 spin_unlock_irq(q->queue_lock);
@@ -3478,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
3478 */ 3487 */
3479void blk_post_runtime_resume(struct request_queue *q, int err) 3488void blk_post_runtime_resume(struct request_queue *q, int err)
3480{ 3489{
3490 if (!q->dev)
3491 return;
3492
3481 spin_lock_irq(q->queue_lock); 3493 spin_lock_irq(q->queue_lock);
3482 if (!err) { 3494 if (!err) {
3483 q->rpm_status = RPM_ACTIVE; 3495 q->rpm_status = RPM_ACTIVE;
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index b4ffc5be1a93..e5b5721809e2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
277 if (WARN_ON_ONCE(in_irq())) 277 if (WARN_ON_ONCE(in_irq()))
278 return -EDEADLK; 278 return -EDEADLK;
279 279
280 walk->iv = req->info;
280 walk->nbytes = walk->total; 281 walk->nbytes = walk->total;
281 if (unlikely(!walk->total)) 282 if (unlikely(!walk->total))
282 return 0; 283 return 0;
283 284
284 walk->iv_buffer = NULL; 285 walk->iv_buffer = NULL;
285 walk->iv = req->info;
286 if (unlikely(((unsigned long)walk->iv & alignmask))) { 286 if (unlikely(((unsigned long)walk->iv & alignmask))) {
287 int err = ablkcipher_copy_iv(walk, tfm, alignmask); 287 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
288 288
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 11b981492031..8cc1622b2ee0 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
326 if (WARN_ON_ONCE(in_irq())) 326 if (WARN_ON_ONCE(in_irq()))
327 return -EDEADLK; 327 return -EDEADLK;
328 328
329 walk->iv = desc->info;
329 walk->nbytes = walk->total; 330 walk->nbytes = walk->total;
330 if (unlikely(!walk->total)) 331 if (unlikely(!walk->total))
331 return 0; 332 return 0;
332 333
333 walk->buffer = NULL; 334 walk->buffer = NULL;
334 walk->iv = desc->info;
335 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 335 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
336 int err = blkcipher_copy_iv(walk); 336 int err = blkcipher_copy_iv(walk);
337 if (err) 337 if (err)
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index e7ed39bab97d..aa45d4802707 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1810 if (!dev->driver) { 1810 if (!dev->driver) {
1811 /* dev->driver may be null if we're being removed */ 1811 /* dev->driver may be null if we're being removed */
1812 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 1812 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
1813 return; 1813 goto out_unlock;
1814 } 1814 }
1815 1815
1816 if (!acpi_desc) { 1816 if (!acpi_desc) {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ff02bb4218fc..cdfbcc54821f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
314 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */ 314 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
315 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */ 315 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
316 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */ 316 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
317 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
318 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
319 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
320 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
321 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
322 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
323 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
324 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
325 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
326 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
327 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ 317 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
328 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ 318 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
329 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ 319 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
350 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ 340 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
351 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ 341 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
352 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ 342 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
343 { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
353 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ 344 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
354 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ 345 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
346 { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
355 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ 347 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
356 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ 348 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
349 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
350 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
351 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
352 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
353 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
354 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
355 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
356 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
357 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
358 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
357 359
358 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 360 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
359 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 361 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 8490d37aee2a..f7a7fa81740e 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
63} 63}
64 64
65#ifdef CONFIG_PM_SLEEP
65static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) 66static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
66{ 67{
67 return ahci_platform_suspend_host(&pdev->dev); 68 return ahci_platform_suspend_host(&pdev->dev);
@@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
81 82
82 return ahci_platform_resume_host(&pdev->dev); 83 return ahci_platform_resume_host(&pdev->dev);
83} 84}
85#else
86#define ahci_mvebu_suspend NULL
87#define ahci_mvebu_resume NULL
88#endif
84 89
85static const struct ata_port_info ahci_mvebu_port_info = { 90static const struct ata_port_info ahci_mvebu_port_info = {
86 .flags = AHCI_FLAG_COMMON, 91 .flags = AHCI_FLAG_COMMON,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 096064cd6c52..4665512dae44 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1273 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1273 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1274 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1274 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1275 1275
1276 /* set port value for softreset of Port Multiplier */
1277 if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
1278 tmp = readl(port_mmio + PORT_FBS);
1279 tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1280 tmp |= pmp << PORT_FBS_DEV_OFFSET;
1281 writel(tmp, port_mmio + PORT_FBS);
1282 pp->fbs_last_dev = pmp;
1283 }
1284
1276 /* issue & wait */ 1285 /* issue & wait */
1277 writel(1, port_mmio + PORT_CMD_ISSUE); 1286 writel(1, port_mmio + PORT_CMD_ISSUE);
1278 1287
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index cb0508af1459..961acc788f44 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
1505unsigned int ata_read_log_page(struct ata_device *dev, u8 log, 1505unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1506 u8 page, void *buf, unsigned int sectors) 1506 u8 page, void *buf, unsigned int sectors)
1507{ 1507{
1508 unsigned long ap_flags = dev->link->ap->flags;
1508 struct ata_taskfile tf; 1509 struct ata_taskfile tf;
1509 unsigned int err_mask; 1510 unsigned int err_mask;
1510 bool dma = false; 1511 bool dma = false;
1511 1512
1512 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 1513 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1513 1514
1515 /*
1516 * Return error without actually issuing the command on controllers
1517 * which e.g. lockup on a read log page.
1518 */
1519 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1520 return AC_ERR_DEV;
1521
1514retry: 1522retry:
1515 ata_tf_init(dev, &tf); 1523 ata_tf_init(dev, &tf);
1516 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 1524 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 5389579c5120..a723ae929783 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -45,7 +45,8 @@ enum {
45 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */ 45 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
46 46
47 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 47 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
48 ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN), 48 ATA_FLAG_PMP | ATA_FLAG_NCQ |
49 ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
49 50
50 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, 51 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
51 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ 52 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index dea6edcbf145..29bcff086bce 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
630 unsigned int n, quirks = 0; 630 unsigned int n, quirks = 0;
631 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 631 unsigned char model_num[ATA_ID_PROD_LEN + 1];
632 632
633 /* This controller doesn't support trim */
634 dev->horkage |= ATA_HORKAGE_NOTRIM;
635
633 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 636 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
634 637
635 for (n = 0; sil_blacklist[n].product; n++) 638 for (n = 0; sil_blacklist[n].product; n++)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2804aed3f416..25425d3f2575 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
303 if (mem->state == MEM_OFFLINE) 303 if (mem->state == MEM_OFFLINE)
304 return 0; 304 return 0;
305 305
306 /* Can't offline block with non-present sections */
307 if (mem->section_count != sections_per_block)
308 return -EINVAL;
309
306 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 310 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
307} 311}
308 312
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 167418e73445..65f50eccd49b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
390 struct generic_pm_domain *genpd; 390 struct generic_pm_domain *genpd;
391 bool (*stop_ok)(struct device *__dev); 391 bool (*stop_ok)(struct device *__dev);
392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
393 bool runtime_pm = pm_runtime_enabled(dev);
393 ktime_t time_start; 394 ktime_t time_start;
394 s64 elapsed_ns; 395 s64 elapsed_ns;
395 int ret; 396 int ret;
@@ -400,12 +401,19 @@ static int pm_genpd_runtime_suspend(struct device *dev)
400 if (IS_ERR(genpd)) 401 if (IS_ERR(genpd))
401 return -EINVAL; 402 return -EINVAL;
402 403
404 /*
405 * A runtime PM centric subsystem/driver may re-use the runtime PM
406 * callbacks for other purposes than runtime PM. In those scenarios
407 * runtime PM is disabled. Under these circumstances, we shall skip
408 * validating/measuring the PM QoS latency.
409 */
403 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 410 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
404 if (stop_ok && !stop_ok(dev)) 411 if (runtime_pm && stop_ok && !stop_ok(dev))
405 return -EBUSY; 412 return -EBUSY;
406 413
407 /* Measure suspend latency. */ 414 /* Measure suspend latency. */
408 time_start = ktime_get(); 415 if (runtime_pm)
416 time_start = ktime_get();
409 417
410 ret = genpd_save_dev(genpd, dev); 418 ret = genpd_save_dev(genpd, dev);
411 if (ret) 419 if (ret)
@@ -418,13 +426,15 @@ static int pm_genpd_runtime_suspend(struct device *dev)
418 } 426 }
419 427
420 /* Update suspend latency value if the measured time exceeds it. */ 428 /* Update suspend latency value if the measured time exceeds it. */
421 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 429 if (runtime_pm) {
422 if (elapsed_ns > td->suspend_latency_ns) { 430 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
423 td->suspend_latency_ns = elapsed_ns; 431 if (elapsed_ns > td->suspend_latency_ns) {
424 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 432 td->suspend_latency_ns = elapsed_ns;
425 elapsed_ns); 433 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
426 genpd->max_off_time_changed = true; 434 elapsed_ns);
427 td->constraint_changed = true; 435 genpd->max_off_time_changed = true;
436 td->constraint_changed = true;
437 }
428 } 438 }
429 439
430 /* 440 /*
@@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
453{ 463{
454 struct generic_pm_domain *genpd; 464 struct generic_pm_domain *genpd;
455 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 465 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
466 bool runtime_pm = pm_runtime_enabled(dev);
456 ktime_t time_start; 467 ktime_t time_start;
457 s64 elapsed_ns; 468 s64 elapsed_ns;
458 int ret; 469 int ret;
@@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
479 490
480 out: 491 out:
481 /* Measure resume latency. */ 492 /* Measure resume latency. */
482 if (timed) 493 if (timed && runtime_pm)
483 time_start = ktime_get(); 494 time_start = ktime_get();
484 495
485 genpd_start_dev(genpd, dev); 496 genpd_start_dev(genpd, dev);
486 genpd_restore_dev(genpd, dev); 497 genpd_restore_dev(genpd, dev);
487 498
488 /* Update resume latency value if the measured time exceeds it. */ 499 /* Update resume latency value if the measured time exceeds it. */
489 if (timed) { 500 if (timed && runtime_pm) {
490 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 501 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
491 if (elapsed_ns > td->resume_latency_ns) { 502 if (elapsed_ns > td->resume_latency_ns) {
492 td->resume_latency_ns = elapsed_ns; 503 td->resume_latency_ns = elapsed_ns;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 0c3940ec5e62..a428e4ef71fd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd)
219{ 219{
220 struct request_queue *q = NULL; 220 struct request_queue *q = NULL;
221 221
222 if (cmd->rq)
223 q = cmd->rq->q;
224
222 switch (queue_mode) { 225 switch (queue_mode) {
223 case NULL_Q_MQ: 226 case NULL_Q_MQ:
224 blk_mq_end_request(cmd->rq, 0); 227 blk_mq_end_request(cmd->rq, 0);
@@ -232,9 +235,6 @@ static void end_cmd(struct nullb_cmd *cmd)
232 goto free_cmd; 235 goto free_cmd;
233 } 236 }
234 237
235 if (cmd->rq)
236 q = cmd->rq->q;
237
238 /* Restart queue if needed, as we are freeing a tag */ 238 /* Restart queue if needed, as we are freeing a tag */
239 if (q && !q->mq_ops && blk_queue_stopped(q)) { 239 if (q && !q->mq_ops && blk_queue_stopped(q)) {
240 unsigned long flags; 240 unsigned long flags;
@@ -444,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error)
444 blk_put_request(rq); 444 blk_put_request(rq);
445} 445}
446 446
447static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) 447static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
448{ 448{
449 struct request_queue *q = dev->q;
449 struct request *rq; 450 struct request *rq;
450 struct bio *bio = rqd->bio; 451 struct bio *bio = rqd->bio;
451 452
@@ -470,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
470 return 0; 471 return 0;
471} 472}
472 473
473static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) 474static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
474{ 475{
475 sector_t size = gb * 1024 * 1024 * 1024ULL; 476 sector_t size = gb * 1024 * 1024 * 1024ULL;
476 sector_t blksize; 477 sector_t blksize;
@@ -523,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
523 return 0; 524 return 0;
524} 525}
525 526
526static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) 527static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
527{ 528{
528 mempool_t *virtmem_pool; 529 mempool_t *virtmem_pool;
529 530
@@ -541,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
541 mempool_destroy(pool); 542 mempool_destroy(pool);
542} 543}
543 544
544static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, 545static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
545 gfp_t mem_flags, dma_addr_t *dma_handler) 546 gfp_t mem_flags, dma_addr_t *dma_handler)
546{ 547{
547 return mempool_alloc(pool, mem_flags); 548 return mempool_alloc(pool, mem_flags);
@@ -765,7 +766,9 @@ out:
765 766
766static int __init null_init(void) 767static int __init null_init(void)
767{ 768{
769 int ret = 0;
768 unsigned int i; 770 unsigned int i;
771 struct nullb *nullb;
769 772
770 if (bs > PAGE_SIZE) { 773 if (bs > PAGE_SIZE) {
771 pr_warn("null_blk: invalid block size\n"); 774 pr_warn("null_blk: invalid block size\n");
@@ -807,22 +810,29 @@ static int __init null_init(void)
807 0, 0, NULL); 810 0, 0, NULL);
808 if (!ppa_cache) { 811 if (!ppa_cache) {
809 pr_err("null_blk: unable to create ppa cache\n"); 812 pr_err("null_blk: unable to create ppa cache\n");
810 return -ENOMEM; 813 ret = -ENOMEM;
814 goto err_ppa;
811 } 815 }
812 } 816 }
813 817
814 for (i = 0; i < nr_devices; i++) { 818 for (i = 0; i < nr_devices; i++) {
815 if (null_add_dev()) { 819 ret = null_add_dev();
816 unregister_blkdev(null_major, "nullb"); 820 if (ret)
817 goto err_ppa; 821 goto err_dev;
818 }
819 } 822 }
820 823
821 pr_info("null: module loaded\n"); 824 pr_info("null: module loaded\n");
822 return 0; 825 return 0;
823err_ppa: 826
827err_dev:
828 while (!list_empty(&nullb_list)) {
829 nullb = list_entry(nullb_list.next, struct nullb, list);
830 null_del_dev(nullb);
831 }
824 kmem_cache_destroy(ppa_cache); 832 kmem_cache_destroy(ppa_cache);
825 return -EINVAL; 833err_ppa:
834 unregister_blkdev(null_major, "nullb");
835 return ret;
826} 836}
827 837
828static void __exit null_exit(void) 838static void __exit null_exit(void)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index f9099940c272..41fb1a917b17 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
950 goto unmap; 950 goto unmap;
951 951
952 for (n = 0, i = 0; n < nseg; n++) { 952 for (n = 0, i = 0; n < nseg; n++) {
953 uint8_t first_sect, last_sect;
954
953 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { 955 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
954 /* Map indirect segments */ 956 /* Map indirect segments */
955 if (segments) 957 if (segments)
@@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
957 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); 959 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
958 } 960 }
959 i = n % SEGS_PER_INDIRECT_FRAME; 961 i = n % SEGS_PER_INDIRECT_FRAME;
962
960 pending_req->segments[n]->gref = segments[i].gref; 963 pending_req->segments[n]->gref = segments[i].gref;
961 seg[n].nsec = segments[i].last_sect - 964
962 segments[i].first_sect + 1; 965 first_sect = READ_ONCE(segments[i].first_sect);
963 seg[n].offset = (segments[i].first_sect << 9); 966 last_sect = READ_ONCE(segments[i].last_sect);
964 if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || 967 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
965 (segments[i].last_sect < segments[i].first_sect)) {
966 rc = -EINVAL; 968 rc = -EINVAL;
967 goto unmap; 969 goto unmap;
968 } 970 }
971
972 seg[n].nsec = last_sect - first_sect + 1;
973 seg[n].offset = first_sect << 9;
969 preq->nr_sects += seg[n].nsec; 974 preq->nr_sects += seg[n].nsec;
970 } 975 }
971 976
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 68e87a037b99..c929ae22764c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
408 struct blkif_x86_32_request *src) 408 struct blkif_x86_32_request *src)
409{ 409{
410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
411 dst->operation = src->operation; 411 dst->operation = READ_ONCE(src->operation);
412 switch (src->operation) { 412 switch (dst->operation) {
413 case BLKIF_OP_READ: 413 case BLKIF_OP_READ:
414 case BLKIF_OP_WRITE: 414 case BLKIF_OP_WRITE:
415 case BLKIF_OP_WRITE_BARRIER: 415 case BLKIF_OP_WRITE_BARRIER:
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
456 struct blkif_x86_64_request *src) 456 struct blkif_x86_64_request *src)
457{ 457{
458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
459 dst->operation = src->operation; 459 dst->operation = READ_ONCE(src->operation);
460 switch (src->operation) { 460 switch (dst->operation) {
461 case BLKIF_OP_READ: 461 case BLKIF_OP_READ:
462 case BLKIF_OP_WRITE: 462 case BLKIF_OP_WRITE:
463 case BLKIF_OP_WRITE_BARRIER: 463 case BLKIF_OP_WRITE_BARRIER:
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 55fe9020459f..4cc72fa017c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1230,14 +1230,14 @@ static int smi_start_processing(void *send_info,
1230 1230
1231 new_smi->intf = intf; 1231 new_smi->intf = intf;
1232 1232
1233 /* Try to claim any interrupts. */
1234 if (new_smi->irq_setup)
1235 new_smi->irq_setup(new_smi);
1236
1237 /* Set up the timer that drives the interface. */ 1233 /* Set up the timer that drives the interface. */
1238 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); 1234 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1239 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1235 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1240 1236
1237 /* Try to claim any interrupts. */
1238 if (new_smi->irq_setup)
1239 new_smi->irq_setup(new_smi);
1240
1241 /* 1241 /*
1242 * Check if the user forcefully enabled the daemon. 1242 * Check if the user forcefully enabled the daemon.
1243 */ 1243 */
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 10819e248414..335322dc403f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -209,6 +209,8 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
209 209
210struct clk_gpio_delayed_register_data { 210struct clk_gpio_delayed_register_data {
211 const char *gpio_name; 211 const char *gpio_name;
212 int num_parents;
213 const char **parent_names;
212 struct device_node *node; 214 struct device_node *node;
213 struct mutex lock; 215 struct mutex lock;
214 struct clk *clk; 216 struct clk *clk;
@@ -222,8 +224,6 @@ static struct clk *of_clk_gpio_delayed_register_get(
222{ 224{
223 struct clk_gpio_delayed_register_data *data = _data; 225 struct clk_gpio_delayed_register_data *data = _data;
224 struct clk *clk; 226 struct clk *clk;
225 const char **parent_names;
226 int i, num_parents;
227 int gpio; 227 int gpio;
228 enum of_gpio_flags of_flags; 228 enum of_gpio_flags of_flags;
229 229
@@ -248,26 +248,14 @@ static struct clk *of_clk_gpio_delayed_register_get(
248 return ERR_PTR(gpio); 248 return ERR_PTR(gpio);
249 } 249 }
250 250
251 num_parents = of_clk_get_parent_count(data->node); 251 clk = data->clk_register_get(data->node->name, data->parent_names,
252 252 data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
253 parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
254 if (!parent_names) {
255 clk = ERR_PTR(-ENOMEM);
256 goto out;
257 }
258
259 for (i = 0; i < num_parents; i++)
260 parent_names[i] = of_clk_get_parent_name(data->node, i);
261
262 clk = data->clk_register_get(data->node->name, parent_names,
263 num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
264 if (IS_ERR(clk)) 253 if (IS_ERR(clk))
265 goto out; 254 goto out;
266 255
267 data->clk = clk; 256 data->clk = clk;
268out: 257out:
269 mutex_unlock(&data->lock); 258 mutex_unlock(&data->lock);
270 kfree(parent_names);
271 259
272 return clk; 260 return clk;
273} 261}
@@ -296,11 +284,24 @@ static void __init of_gpio_clk_setup(struct device_node *node,
296 unsigned gpio, bool active_low)) 284 unsigned gpio, bool active_low))
297{ 285{
298 struct clk_gpio_delayed_register_data *data; 286 struct clk_gpio_delayed_register_data *data;
287 const char **parent_names;
288 int i, num_parents;
299 289
300 data = kzalloc(sizeof(*data), GFP_KERNEL); 290 data = kzalloc(sizeof(*data), GFP_KERNEL);
301 if (!data) 291 if (!data)
302 return; 292 return;
303 293
294 num_parents = of_clk_get_parent_count(node);
295
296 parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
297 if (!parent_names)
298 return;
299
300 for (i = 0; i < num_parents; i++)
301 parent_names[i] = of_clk_get_parent_name(node, i);
302
303 data->num_parents = num_parents;
304 data->parent_names = parent_names;
304 data->node = node; 305 data->node = node;
305 data->gpio_name = gpio_name; 306 data->gpio_name = gpio_name;
306 data->clk_register_get = clk_register_get; 307 data->clk_register_get = clk_register_get;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 1ab0fb81c6a0..7bc1c4527ae4 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -778,8 +778,10 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
778 */ 778 */
779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
780 div = get_pll_div(cg, hwc, clksel); 780 div = get_pll_div(cg, hwc, clksel);
781 if (!div) 781 if (!div) {
782 kfree(hwc);
782 return NULL; 783 return NULL;
784 }
783 785
784 pct80_rate = clk_get_rate(div->clk); 786 pct80_rate = clk_get_rate(div->clk);
785 pct80_rate *= 8; 787 pct80_rate *= 8;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 0b501a9fef92..cd0f2726f5e0 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -292,6 +292,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
292 ret = scpi_clk_add(dev, child, match); 292 ret = scpi_clk_add(dev, child, match);
293 if (ret) { 293 if (ret) {
294 scpi_clocks_remove(pdev); 294 scpi_clocks_remove(pdev);
295 of_node_put(child);
295 return ret; 296 return ret;
296 } 297 }
297 } 298 }
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 8564e4342c7d..82fe3662b5f6 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -52,7 +52,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
52 unsigned long parent_rate) 52 unsigned long parent_rate)
53{ 53{
54 struct clk_pllv1 *pll = to_clk_pllv1(hw); 54 struct clk_pllv1 *pll = to_clk_pllv1(hw);
55 long long ll; 55 unsigned long long ull;
56 int mfn_abs; 56 int mfn_abs;
57 unsigned int mfi, mfn, mfd, pd; 57 unsigned int mfi, mfn, mfd, pd;
58 u32 reg; 58 u32 reg;
@@ -94,16 +94,16 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
94 rate = parent_rate * 2; 94 rate = parent_rate * 2;
95 rate /= pd + 1; 95 rate /= pd + 1;
96 96
97 ll = (unsigned long long)rate * mfn_abs; 97 ull = (unsigned long long)rate * mfn_abs;
98 98
99 do_div(ll, mfd + 1); 99 do_div(ull, mfd + 1);
100 100
101 if (mfn_is_negative(pll, mfn)) 101 if (mfn_is_negative(pll, mfn))
102 ll = -ll; 102 ull = (rate * mfi) - ull;
103 else
104 ull = (rate * mfi) + ull;
103 105
104 ll = (rate * mfi) + ll; 106 return ull;
105
106 return ll;
107} 107}
108 108
109static struct clk_ops clk_pllv1_ops = { 109static struct clk_ops clk_pllv1_ops = {
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index b18f875eac6a..4aeda56ce372 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -79,7 +79,7 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
79{ 79{
80 long mfi, mfn, mfd, pdf, ref_clk; 80 long mfi, mfn, mfd, pdf, ref_clk;
81 unsigned long dbl; 81 unsigned long dbl;
82 s64 temp; 82 u64 temp;
83 83
84 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN; 84 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
85 85
@@ -98,8 +98,9 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
98 temp = (u64) ref_clk * abs(mfn); 98 temp = (u64) ref_clk * abs(mfn);
99 do_div(temp, mfd + 1); 99 do_div(temp, mfd + 1);
100 if (mfn < 0) 100 if (mfn < 0)
101 temp = -temp; 101 temp = (ref_clk * mfi) - temp;
102 temp = (ref_clk * mfi) + temp; 102 else
103 temp = (ref_clk * mfi) + temp;
103 104
104 return temp; 105 return temp;
105} 106}
@@ -126,7 +127,7 @@ static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
126{ 127{
127 u32 reg; 128 u32 reg;
128 long mfi, pdf, mfn, mfd = 999999; 129 long mfi, pdf, mfn, mfd = 999999;
129 s64 temp64; 130 u64 temp64;
130 unsigned long quad_parent_rate; 131 unsigned long quad_parent_rate;
131 132
132 quad_parent_rate = 4 * parent_rate; 133 quad_parent_rate = 4 * parent_rate;
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index d1b1c95177bb..0a94d9661d91 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -335,22 +335,22 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
335 clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4); 335 clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4);
336 clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16); 336 clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16);
337 clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4); 337 clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4);
338 clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "sai0_div", CCM_CCGR0, CCM_CCGRx_CGn(15)); 338 clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(15));
339 339
340 clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4); 340 clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4);
341 clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17); 341 clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17);
342 clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4); 342 clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4);
343 clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "sai1_div", CCM_CCGR1, CCM_CCGRx_CGn(0)); 343 clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(0));
344 344
345 clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4); 345 clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4);
346 clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18); 346 clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18);
347 clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4); 347 clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4);
348 clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "sai2_div", CCM_CCGR1, CCM_CCGRx_CGn(1)); 348 clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(1));
349 349
350 clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4); 350 clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4);
351 clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19); 351 clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19);
352 clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4); 352 clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4);
353 clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "sai3_div", CCM_CCGR1, CCM_CCGRx_CGn(2)); 353 clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(2));
354 354
355 clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4); 355 clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4);
356 clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9); 356 clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9);
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 09d2832fbd78..71fd29348f28 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 93e967c0f972..75244915df05 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 993abcdb32cc..37ba04ba1368 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 5484c31ec568..0ee1f363e4be 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -41,15 +41,10 @@
41 41
42#define SUN4I_PLL2_OUTPUTS 4 42#define SUN4I_PLL2_OUTPUTS 4
43 43
44struct sun4i_pll2_data {
45 u32 post_div_offset;
46 u32 pre_div_flags;
47};
48
49static DEFINE_SPINLOCK(sun4i_a10_pll2_lock); 44static DEFINE_SPINLOCK(sun4i_a10_pll2_lock);
50 45
51static void __init sun4i_pll2_setup(struct device_node *node, 46static void __init sun4i_pll2_setup(struct device_node *node,
52 struct sun4i_pll2_data *data) 47 int post_div_offset)
53{ 48{
54 const char *clk_name = node->name, *parent; 49 const char *clk_name = node->name, *parent;
55 struct clk **clks, *base_clk, *prediv_clk; 50 struct clk **clks, *base_clk, *prediv_clk;
@@ -76,7 +71,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
76 parent, 0, reg, 71 parent, 0, reg,
77 SUN4I_PLL2_PRE_DIV_SHIFT, 72 SUN4I_PLL2_PRE_DIV_SHIFT,
78 SUN4I_PLL2_PRE_DIV_WIDTH, 73 SUN4I_PLL2_PRE_DIV_WIDTH,
79 data->pre_div_flags, 74 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
80 &sun4i_a10_pll2_lock); 75 &sun4i_a10_pll2_lock);
81 if (!prediv_clk) { 76 if (!prediv_clk) {
82 pr_err("Couldn't register the prediv clock\n"); 77 pr_err("Couldn't register the prediv clock\n");
@@ -127,7 +122,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
127 */ 122 */
128 val = readl(reg); 123 val = readl(reg);
129 val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT); 124 val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT);
130 val |= (SUN4I_PLL2_POST_DIV_VALUE - data->post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT; 125 val |= (SUN4I_PLL2_POST_DIV_VALUE - post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
131 writel(val, reg); 126 writel(val, reg);
132 127
133 of_property_read_string_index(node, "clock-output-names", 128 of_property_read_string_index(node, "clock-output-names",
@@ -191,25 +186,17 @@ err_unmap:
191 iounmap(reg); 186 iounmap(reg);
192} 187}
193 188
194static struct sun4i_pll2_data sun4i_a10_pll2_data = {
195 .pre_div_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
196};
197
198static void __init sun4i_a10_pll2_setup(struct device_node *node) 189static void __init sun4i_a10_pll2_setup(struct device_node *node)
199{ 190{
200 sun4i_pll2_setup(node, &sun4i_a10_pll2_data); 191 sun4i_pll2_setup(node, 0);
201} 192}
202 193
203CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk", 194CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk",
204 sun4i_a10_pll2_setup); 195 sun4i_a10_pll2_setup);
205 196
206static struct sun4i_pll2_data sun5i_a13_pll2_data = {
207 .post_div_offset = 1,
208};
209
210static void __init sun5i_a13_pll2_setup(struct device_node *node) 197static void __init sun5i_a13_pll2_setup(struct device_node *node)
211{ 198{
212 sun4i_pll2_setup(node, &sun5i_a13_pll2_data); 199 sun4i_pll2_setup(node, 1);
213} 200}
214 201
215CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk", 202CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk",
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 1dfad0c712cd..2a5d84fdddc5 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -20,6 +20,8 @@ static struct ti_dt_clk dm816x_clks[] = {
20 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"), 20 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
21 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), 21 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
22 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"), 22 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
23 DT_CLK(NULL, "timer_32k_ck", "sysclk18_ck"),
24 DT_CLK(NULL, "timer_ext_ck", "tclkin_ck"),
23 DT_CLK(NULL, "mpu_ck", "mpu_ck"), 25 DT_CLK(NULL, "mpu_ck", "mpu_ck"),
24 DT_CLK(NULL, "timer1_fck", "timer1_fck"), 26 DT_CLK(NULL, "timer1_fck", "timer1_fck"),
25 DT_CLK(NULL, "timer2_fck", "timer2_fck"), 27 DT_CLK(NULL, "timer2_fck", "timer2_fck"),
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index 9023ca9caf84..b5cc6f66ae5d 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -240,7 +240,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
240 */ 240 */
241unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) 241unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
242{ 242{
243 long long dpll_clk; 243 u64 dpll_clk;
244 u32 dpll_mult, dpll_div, v; 244 u32 dpll_mult, dpll_div, v;
245 struct dpll_data *dd; 245 struct dpll_data *dd;
246 246
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
262 dpll_div = v & dd->div1_mask; 262 dpll_div = v & dd->div1_mask;
263 dpll_div >>= __ffs(dd->div1_mask); 263 dpll_div >>= __ffs(dd->div1_mask);
264 264
265 dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult; 265 dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
266 do_div(dpll_clk, dpll_div + 1); 266 do_div(dpll_clk, dpll_div + 1);
267 267
268 return dpll_clk; 268 return dpll_clk;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 5b1726829e6d..df2558350fc1 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -214,7 +214,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
214{ 214{
215 struct clk_divider *divider; 215 struct clk_divider *divider;
216 unsigned int div, value; 216 unsigned int div, value;
217 unsigned long flags = 0;
218 u32 val; 217 u32 val;
219 218
220 if (!hw || !rate) 219 if (!hw || !rate)
@@ -228,9 +227,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
228 if (value > div_mask(divider)) 227 if (value > div_mask(divider))
229 value = div_mask(divider); 228 value = div_mask(divider);
230 229
231 if (divider->lock)
232 spin_lock_irqsave(divider->lock, flags);
233
234 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 230 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
235 val = div_mask(divider) << (divider->shift + 16); 231 val = div_mask(divider) << (divider->shift + 16);
236 } else { 232 } else {
@@ -240,9 +236,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
240 val |= value << divider->shift; 236 val |= value << divider->shift;
241 ti_clk_ll_ops->clk_writel(val, divider->reg); 237 ti_clk_ll_ops->clk_writel(val, divider->reg);
242 238
243 if (divider->lock)
244 spin_unlock_irqrestore(divider->lock, flags);
245
246 return 0; 239 return 0;
247} 240}
248 241
@@ -256,8 +249,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
256 const char *parent_name, 249 const char *parent_name,
257 unsigned long flags, void __iomem *reg, 250 unsigned long flags, void __iomem *reg,
258 u8 shift, u8 width, u8 clk_divider_flags, 251 u8 shift, u8 width, u8 clk_divider_flags,
259 const struct clk_div_table *table, 252 const struct clk_div_table *table)
260 spinlock_t *lock)
261{ 253{
262 struct clk_divider *div; 254 struct clk_divider *div;
263 struct clk *clk; 255 struct clk *clk;
@@ -288,7 +280,6 @@ static struct clk *_register_divider(struct device *dev, const char *name,
288 div->shift = shift; 280 div->shift = shift;
289 div->width = width; 281 div->width = width;
290 div->flags = clk_divider_flags; 282 div->flags = clk_divider_flags;
291 div->lock = lock;
292 div->hw.init = &init; 283 div->hw.init = &init;
293 div->table = table; 284 div->table = table;
294 285
@@ -421,7 +412,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
421 412
422 clk = _register_divider(NULL, setup->name, div->parent, 413 clk = _register_divider(NULL, setup->name, div->parent,
423 flags, (void __iomem *)reg, div->bit_shift, 414 flags, (void __iomem *)reg, div->bit_shift,
424 width, div_flags, table, NULL); 415 width, div_flags, table);
425 416
426 if (IS_ERR(clk)) 417 if (IS_ERR(clk))
427 kfree(table); 418 kfree(table);
@@ -584,8 +575,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
584 goto cleanup; 575 goto cleanup;
585 576
586 clk = _register_divider(NULL, node->name, parent_name, flags, reg, 577 clk = _register_divider(NULL, node->name, parent_name, flags, reg,
587 shift, width, clk_divider_flags, table, 578 shift, width, clk_divider_flags, table);
588 NULL);
589 579
590 if (!IS_ERR(clk)) { 580 if (!IS_ERR(clk)) {
591 of_clk_add_provider(node, of_clk_src_simple_get, clk); 581 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index f4b2e9888bdf..66a0d0ed8b55 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -168,7 +168,7 @@ static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
168{ 168{
169 struct fapll_data *fd = to_fapll(hw); 169 struct fapll_data *fd = to_fapll(hw);
170 u32 fapll_n, fapll_p, v; 170 u32 fapll_n, fapll_p, v;
171 long long rate; 171 u64 rate;
172 172
173 if (ti_fapll_clock_is_bypass(fd)) 173 if (ti_fapll_clock_is_bypass(fd))
174 return parent_rate; 174 return parent_rate;
@@ -314,7 +314,7 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
314{ 314{
315 struct fapll_synth *synth = to_synth(hw); 315 struct fapll_synth *synth = to_synth(hw);
316 u32 synth_div_m; 316 u32 synth_div_m;
317 long long rate; 317 u64 rate;
318 318
319 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */ 319 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
320 if (!synth->div) 320 if (!synth->div)
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 69f08a1d047d..dab9ba88b9d6 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -69,7 +69,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
69{ 69{
70 struct clk_mux *mux = to_clk_mux(hw); 70 struct clk_mux *mux = to_clk_mux(hw);
71 u32 val; 71 u32 val;
72 unsigned long flags = 0;
73 72
74 if (mux->table) { 73 if (mux->table) {
75 index = mux->table[index]; 74 index = mux->table[index];
@@ -81,9 +80,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
81 index++; 80 index++;
82 } 81 }
83 82
84 if (mux->lock)
85 spin_lock_irqsave(mux->lock, flags);
86
87 if (mux->flags & CLK_MUX_HIWORD_MASK) { 83 if (mux->flags & CLK_MUX_HIWORD_MASK) {
88 val = mux->mask << (mux->shift + 16); 84 val = mux->mask << (mux->shift + 16);
89 } else { 85 } else {
@@ -93,9 +89,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
93 val |= index << mux->shift; 89 val |= index << mux->shift;
94 ti_clk_ll_ops->clk_writel(val, mux->reg); 90 ti_clk_ll_ops->clk_writel(val, mux->reg);
95 91
96 if (mux->lock)
97 spin_unlock_irqrestore(mux->lock, flags);
98
99 return 0; 92 return 0;
100} 93}
101 94
@@ -109,7 +102,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
109 const char **parent_names, u8 num_parents, 102 const char **parent_names, u8 num_parents,
110 unsigned long flags, void __iomem *reg, 103 unsigned long flags, void __iomem *reg,
111 u8 shift, u32 mask, u8 clk_mux_flags, 104 u8 shift, u32 mask, u8 clk_mux_flags,
112 u32 *table, spinlock_t *lock) 105 u32 *table)
113{ 106{
114 struct clk_mux *mux; 107 struct clk_mux *mux;
115 struct clk *clk; 108 struct clk *clk;
@@ -133,7 +126,6 @@ static struct clk *_register_mux(struct device *dev, const char *name,
133 mux->shift = shift; 126 mux->shift = shift;
134 mux->mask = mask; 127 mux->mask = mask;
135 mux->flags = clk_mux_flags; 128 mux->flags = clk_mux_flags;
136 mux->lock = lock;
137 mux->table = table; 129 mux->table = table;
138 mux->hw.init = &init; 130 mux->hw.init = &init;
139 131
@@ -175,7 +167,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
175 167
176 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents, 168 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
177 flags, (void __iomem *)reg, mux->bit_shift, mask, 169 flags, (void __iomem *)reg, mux->bit_shift, mask,
178 mux_flags, NULL, NULL); 170 mux_flags, NULL);
179} 171}
180 172
181/** 173/**
@@ -227,8 +219,7 @@ static void of_mux_clk_setup(struct device_node *node)
227 mask = (1 << fls(mask)) - 1; 219 mask = (1 << fls(mask)) - 1;
228 220
229 clk = _register_mux(NULL, node->name, parent_names, num_parents, 221 clk = _register_mux(NULL, node->name, parent_names, num_parents,
230 flags, reg, shift, mask, clk_mux_flags, NULL, 222 flags, reg, shift, mask, clk_mux_flags, NULL);
231 NULL);
232 223
233 if (!IS_ERR(clk)) 224 if (!IS_ERR(clk))
234 of_clk_add_provider(node, of_clk_src_simple_get, clk); 225 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c
index 1593ade2a815..c4f7d7a9b689 100644
--- a/drivers/clocksource/mmio.c
+++ b/drivers/clocksource/mmio.c
@@ -55,7 +55,7 @@ int __init clocksource_mmio_init(void __iomem *base, const char *name,
55{ 55{
56 struct clocksource_mmio *cs; 56 struct clocksource_mmio *cs;
57 57
58 if (bits > 32 || bits < 16) 58 if (bits > 64 || bits < 16)
59 return -EINVAL; 59 return -EINVAL;
60 60
61 cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL); 61 cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 235a1ba73d92..b1f8a73e5a94 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -226,7 +226,7 @@ config ARM_TEGRA20_CPUFREQ
226 226
227config ARM_TEGRA124_CPUFREQ 227config ARM_TEGRA124_CPUFREQ
228 tristate "Tegra124 CPUFreq support" 228 tristate "Tegra124 CPUFreq support"
229 depends on ARCH_TEGRA && CPUFREQ_DT 229 depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
230 default y 230 default y
231 help 231 help
232 This adds the CPUFreq driver support for Tegra124 SOCs. 232 This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4d07cbd2b23c..98fb8821382d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1123,7 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1123 limits->max_sysfs_pct); 1123 limits->max_sysfs_pct);
1124 limits->max_perf_pct = max(limits->min_policy_pct, 1124 limits->max_perf_pct = max(limits->min_policy_pct,
1125 limits->max_perf_pct); 1125 limits->max_perf_pct);
1126 limits->max_perf = round_up(limits->max_perf, 8); 1126 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1127 1127
1128 /* Make sure min_perf_pct <= max_perf_pct */ 1128 /* Make sure min_perf_pct <= max_perf_pct */
1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 733aa5153e74..68ef8fd9482f 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -648,7 +648,7 @@ late_initcall(s3c_cpufreq_initcall);
648 * 648 *
649 * Register the given set of PLLs with the system. 649 * Register the given set of PLLs with the system.
650 */ 650 */
651int __init s3c_plltab_register(struct cpufreq_frequency_table *plls, 651int s3c_plltab_register(struct cpufreq_frequency_table *plls,
652 unsigned int plls_no) 652 unsigned int plls_no)
653{ 653{
654 struct cpufreq_frequency_table *vals; 654 struct cpufreq_frequency_table *vals;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7f039de143f0..370c661c7d7b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -156,7 +156,7 @@
156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ 156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) 157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) 158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
159#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ 159#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ 160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ 161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ 162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
@@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
965 NULL, 965 NULL,
966 src_addr, dst_addr, 966 src_addr, dst_addr,
967 xt, xt->sgl); 967 xt, xt->sgl);
968 for (i = 0; i < xt->numf; i++) 968
969 /* Length of the block is (BLEN+1) microblocks. */
970 for (i = 0; i < xt->numf - 1; i++)
969 at_xdmac_increment_block_count(chan, first); 971 at_xdmac_increment_block_count(chan, first);
970 972
971 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", 973 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
@@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1086 /* Check remaining length and change data width if needed. */ 1088 /* Check remaining length and change data width if needed. */
1087 dwidth = at_xdmac_align_width(chan, 1089 dwidth = at_xdmac_align_width(chan,
1088 src_addr | dst_addr | xfer_size); 1090 src_addr | dst_addr | xfer_size);
1091 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1089 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); 1092 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1090 1093
1091 ublen = xfer_size >> dwidth; 1094 ublen = xfer_size >> dwidth;
@@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1333 * since we don't care about the stride anymore. 1336 * since we don't care about the stride anymore.
1334 */ 1337 */
1335 if ((i == (sg_len - 1)) && 1338 if ((i == (sg_len - 1)) &&
1336 sg_dma_len(ppsg) == sg_dma_len(psg)) { 1339 sg_dma_len(psg) == sg_dma_len(sg)) {
1337 dev_dbg(chan2dev(chan), 1340 dev_dbg(chan2dev(chan),
1338 "%s: desc 0x%p can be merged with desc 0x%p\n", 1341 "%s: desc 0x%p can be merged with desc 0x%p\n",
1339 __func__, desc, pdesc); 1342 __func__, desc, pdesc);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index c92d6a70ccf3..996c4b00d323 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -31,6 +31,7 @@
31 */ 31 */
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/dmapool.h>
34#include <linux/err.h> 35#include <linux/err.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <linux/interrupt.h> 37#include <linux/interrupt.h>
@@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
62 uint32_t pad[2]; 63 uint32_t pad[2];
63}; 64};
64 65
66struct bcm2835_cb_entry {
67 struct bcm2835_dma_cb *cb;
68 dma_addr_t paddr;
69};
70
65struct bcm2835_chan { 71struct bcm2835_chan {
66 struct virt_dma_chan vc; 72 struct virt_dma_chan vc;
67 struct list_head node; 73 struct list_head node;
@@ -72,18 +78,18 @@ struct bcm2835_chan {
72 78
73 int ch; 79 int ch;
74 struct bcm2835_desc *desc; 80 struct bcm2835_desc *desc;
81 struct dma_pool *cb_pool;
75 82
76 void __iomem *chan_base; 83 void __iomem *chan_base;
77 int irq_number; 84 int irq_number;
78}; 85};
79 86
80struct bcm2835_desc { 87struct bcm2835_desc {
88 struct bcm2835_chan *c;
81 struct virt_dma_desc vd; 89 struct virt_dma_desc vd;
82 enum dma_transfer_direction dir; 90 enum dma_transfer_direction dir;
83 91
84 unsigned int control_block_size; 92 struct bcm2835_cb_entry *cb_list;
85 struct bcm2835_dma_cb *control_block_base;
86 dma_addr_t control_block_base_phys;
87 93
88 unsigned int frames; 94 unsigned int frames;
89 size_t size; 95 size_t size;
@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
143static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 149static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
144{ 150{
145 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); 151 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
146 dma_free_coherent(desc->vd.tx.chan->device->dev, 152 int i;
147 desc->control_block_size, 153
148 desc->control_block_base, 154 for (i = 0; i < desc->frames; i++)
149 desc->control_block_base_phys); 155 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
156 desc->cb_list[i].paddr);
157
158 kfree(desc->cb_list);
150 kfree(desc); 159 kfree(desc);
151} 160}
152 161
@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
199 208
200 c->desc = d = to_bcm2835_dma_desc(&vd->tx); 209 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
201 210
202 writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); 211 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
203 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 212 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
204} 213}
205 214
@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
232static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) 241static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
233{ 242{
234 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 243 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
244 struct device *dev = c->vc.chan.device->dev;
245
246 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
235 247
236 dev_dbg(c->vc.chan.device->dev, 248 c->cb_pool = dma_pool_create(dev_name(dev), dev,
237 "Allocating DMA channel %d\n", c->ch); 249 sizeof(struct bcm2835_dma_cb), 0, 0);
250 if (!c->cb_pool) {
251 dev_err(dev, "unable to allocate descriptor pool\n");
252 return -ENOMEM;
253 }
238 254
239 return request_irq(c->irq_number, 255 return request_irq(c->irq_number,
240 bcm2835_dma_callback, 0, "DMA IRQ", c); 256 bcm2835_dma_callback, 0, "DMA IRQ", c);
@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
246 262
247 vchan_free_chan_resources(&c->vc); 263 vchan_free_chan_resources(&c->vc);
248 free_irq(c->irq_number, c); 264 free_irq(c->irq_number, c);
265 dma_pool_destroy(c->cb_pool);
249 266
250 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); 267 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
251} 268}
@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
261 size_t size; 278 size_t size;
262 279
263 for (size = i = 0; i < d->frames; i++) { 280 for (size = i = 0; i < d->frames; i++) {
264 struct bcm2835_dma_cb *control_block = 281 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
265 &d->control_block_base[i];
266 size_t this_size = control_block->length; 282 size_t this_size = control_block->length;
267 dma_addr_t dma; 283 dma_addr_t dma;
268 284
@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
343 dma_addr_t dev_addr; 359 dma_addr_t dev_addr;
344 unsigned int es, sync_type; 360 unsigned int es, sync_type;
345 unsigned int frame; 361 unsigned int frame;
362 int i;
346 363
347 /* Grab configuration */ 364 /* Grab configuration */
348 if (!is_slave_direction(direction)) { 365 if (!is_slave_direction(direction)) {
@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
374 if (!d) 391 if (!d)
375 return NULL; 392 return NULL;
376 393
394 d->c = c;
377 d->dir = direction; 395 d->dir = direction;
378 d->frames = buf_len / period_len; 396 d->frames = buf_len / period_len;
379 397
380 /* Allocate memory for control blocks */ 398 d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
381 d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); 399 if (!d->cb_list) {
382 d->control_block_base = dma_zalloc_coherent(chan->device->dev,
383 d->control_block_size, &d->control_block_base_phys,
384 GFP_NOWAIT);
385
386 if (!d->control_block_base) {
387 kfree(d); 400 kfree(d);
388 return NULL; 401 return NULL;
389 } 402 }
403 /* Allocate memory for control blocks */
404 for (i = 0; i < d->frames; i++) {
405 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
406
407 cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
408 &cb_entry->paddr);
409 if (!cb_entry->cb)
410 goto error_cb;
411 }
390 412
391 /* 413 /*
392 * Iterate over all frames, create a control block 414 * Iterate over all frames, create a control block
393 * for each frame and link them together. 415 * for each frame and link them together.
394 */ 416 */
395 for (frame = 0; frame < d->frames; frame++) { 417 for (frame = 0; frame < d->frames; frame++) {
396 struct bcm2835_dma_cb *control_block = 418 struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
397 &d->control_block_base[frame];
398 419
399 /* Setup adresses */ 420 /* Setup adresses */
400 if (d->dir == DMA_DEV_TO_MEM) { 421 if (d->dir == DMA_DEV_TO_MEM) {
@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
428 * This DMA engine driver currently only supports cyclic DMA. 449 * This DMA engine driver currently only supports cyclic DMA.
429 * Therefore, wrap around at number of frames. 450 * Therefore, wrap around at number of frames.
430 */ 451 */
431 control_block->next = d->control_block_base_phys + 452 control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
432 sizeof(struct bcm2835_dma_cb)
433 * ((frame + 1) % d->frames);
434 } 453 }
435 454
436 return vchan_tx_prep(&c->vc, &d->vd, flags); 455 return vchan_tx_prep(&c->vc, &d->vd, flags);
456error_cb:
457 i--;
458 for (; i >= 0; i--) {
459 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
460
461 dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
462 }
463
464 kfree(d->cb_list);
465 kfree(d);
466 return NULL;
437} 467}
438 468
439static int bcm2835_dma_slave_config(struct dma_chan *chan, 469static int bcm2835_dma_slave_config(struct dma_chan *chan,
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 0675e268d577..16fe773fb846 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
1752 return ret; 1752 return ret;
1753} 1753}
1754 1754
1755static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) 1755static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1756{ 1756{
1757 s16 *memcpy_ch = memcpy_channels;
1758
1759 if (!memcpy_channels) 1757 if (!memcpy_channels)
1760 return false; 1758 return false;
1761 while (*memcpy_ch != -1) { 1759 while (*memcpy_channels != -1) {
1762 if (*memcpy_ch == ch_num) 1760 if (*memcpy_channels == ch_num)
1763 return true; 1761 return true;
1764 memcpy_ch++; 1762 memcpy_channels++;
1765 } 1763 }
1766 return false; 1764 return false;
1767} 1765}
@@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1775{ 1773{
1776 struct dma_device *s_ddev = &ecc->dma_slave; 1774 struct dma_device *s_ddev = &ecc->dma_slave;
1777 struct dma_device *m_ddev = NULL; 1775 struct dma_device *m_ddev = NULL;
1778 s16 *memcpy_channels = ecc->info->memcpy_channels; 1776 s32 *memcpy_channels = ecc->info->memcpy_channels;
1779 int i, j; 1777 int i, j;
1780 1778
1781 dma_cap_zero(s_ddev->cap_mask); 1779 dma_cap_zero(s_ddev->cap_mask);
@@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1996 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); 1994 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
1997 if (prop) { 1995 if (prop) {
1998 const char pname[] = "ti,edma-memcpy-channels"; 1996 const char pname[] = "ti,edma-memcpy-channels";
1999 size_t nelm = sz / sizeof(s16); 1997 size_t nelm = sz / sizeof(s32);
2000 s16 *memcpy_ch; 1998 s32 *memcpy_ch;
2001 1999
2002 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), 2000 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2003 GFP_KERNEL); 2001 GFP_KERNEL);
2004 if (!memcpy_ch) 2002 if (!memcpy_ch)
2005 return ERR_PTR(-ENOMEM); 2003 return ERR_PTR(-ENOMEM);
2006 2004
2007 ret = of_property_read_u16_array(dev->of_node, pname, 2005 ret = of_property_read_u32_array(dev->of_node, pname,
2008 (u16 *)memcpy_ch, nelm); 2006 (u32 *)memcpy_ch, nelm);
2009 if (ret) 2007 if (ret)
2010 return ERR_PTR(ret); 2008 return ERR_PTR(ret);
2011 2009
@@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2017 &sz); 2015 &sz);
2018 if (prop) { 2016 if (prop) {
2019 const char pname[] = "ti,edma-reserved-slot-ranges"; 2017 const char pname[] = "ti,edma-reserved-slot-ranges";
2018 u32 (*tmp)[2];
2020 s16 (*rsv_slots)[2]; 2019 s16 (*rsv_slots)[2];
2021 size_t nelm = sz / sizeof(*rsv_slots); 2020 size_t nelm = sz / sizeof(*tmp);
2022 struct edma_rsv_info *rsv_info; 2021 struct edma_rsv_info *rsv_info;
2022 int i;
2023 2023
2024 if (!nelm) 2024 if (!nelm)
2025 return info; 2025 return info;
2026 2026
2027 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2028 if (!tmp)
2029 return ERR_PTR(-ENOMEM);
2030
2027 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); 2031 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2028 if (!rsv_info) 2032 if (!rsv_info) {
2033 kfree(tmp);
2029 return ERR_PTR(-ENOMEM); 2034 return ERR_PTR(-ENOMEM);
2035 }
2030 2036
2031 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), 2037 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2032 GFP_KERNEL); 2038 GFP_KERNEL);
2033 if (!rsv_slots) 2039 if (!rsv_slots) {
2040 kfree(tmp);
2034 return ERR_PTR(-ENOMEM); 2041 return ERR_PTR(-ENOMEM);
2042 }
2035 2043
2036 ret = of_property_read_u16_array(dev->of_node, pname, 2044 ret = of_property_read_u32_array(dev->of_node, pname,
2037 (u16 *)rsv_slots, nelm * 2); 2045 (u32 *)tmp, nelm * 2);
2038 if (ret) 2046 if (ret) {
2047 kfree(tmp);
2039 return ERR_PTR(ret); 2048 return ERR_PTR(ret);
2049 }
2040 2050
2051 for (i = 0; i < nelm; i++) {
2052 rsv_slots[i][0] = tmp[i][0];
2053 rsv_slots[i][1] = tmp[i][1];
2054 }
2041 rsv_slots[nelm][0] = -1; 2055 rsv_slots[nelm][0] = -1;
2042 rsv_slots[nelm][1] = -1; 2056 rsv_slots[nelm][1] = -1;
2057
2043 info->rsv = rsv_info; 2058 info->rsv = rsv_info;
2044 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; 2059 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2060
2061 kfree(tmp);
2045 } 2062 }
2046 2063
2047 return info; 2064 return info;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 068e920ecb68..cddfa8dbf4bd 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -317,6 +317,7 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
317 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 317 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
318 struct device *dev = mic_dma_ch_to_device(mic_ch); 318 struct device *dev = mic_dma_ch_to_device(mic_ch);
319 int result; 319 int result;
320 struct dma_async_tx_descriptor *tx = NULL;
320 321
321 if (!len && !flags) 322 if (!len && !flags)
322 return NULL; 323 return NULL;
@@ -324,10 +325,13 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
324 spin_lock(&mic_ch->prep_lock); 325 spin_lock(&mic_ch->prep_lock);
325 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); 326 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
326 if (result >= 0) 327 if (result >= 0)
327 return allocate_tx(mic_ch); 328 tx = allocate_tx(mic_ch);
328 dev_err(dev, "Error enqueueing dma, error=%d\n", result); 329
330 if (!tx)
331 dev_err(dev, "Error enqueueing dma, error=%d\n", result);
332
329 spin_unlock(&mic_ch->prep_lock); 333 spin_unlock(&mic_ch->prep_lock);
330 return NULL; 334 return tx;
331} 335}
332 336
333static struct dma_async_tx_descriptor * 337static struct dma_async_tx_descriptor *
@@ -335,13 +339,14 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
335{ 339{
336 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 340 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
337 int ret; 341 int ret;
342 struct dma_async_tx_descriptor *tx = NULL;
338 343
339 spin_lock(&mic_ch->prep_lock); 344 spin_lock(&mic_ch->prep_lock);
340 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); 345 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
341 if (!ret) 346 if (!ret)
342 return allocate_tx(mic_ch); 347 tx = allocate_tx(mic_ch);
343 spin_unlock(&mic_ch->prep_lock); 348 spin_unlock(&mic_ch->prep_lock);
344 return NULL; 349 return tx;
345} 350}
346 351
347/* Return the status of the transaction */ 352/* Return the status of the transaction */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a24f5cb877e0..953dc9195937 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -122,12 +122,10 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
122 } 122 }
123 123
124 ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size); 124 ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
125 if (ret)
126 return ret;
127 125
128 release_firmware(fw); 126 release_firmware(fw);
129 127
130 return 0; 128 return ret;
131} 129}
132EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load); 130EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
133 131
@@ -256,7 +254,6 @@ int fpga_mgr_register(struct device *dev, const char *name,
256 void *priv) 254 void *priv)
257{ 255{
258 struct fpga_manager *mgr; 256 struct fpga_manager *mgr;
259 const char *dt_label;
260 int id, ret; 257 int id, ret;
261 258
262 if (!mops || !mops->write_init || !mops->write || 259 if (!mops || !mops->write_init || !mops->write ||
@@ -300,11 +297,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
300 mgr->dev.id = id; 297 mgr->dev.id = id;
301 dev_set_drvdata(dev, mgr); 298 dev_set_drvdata(dev, mgr);
302 299
303 dt_label = of_get_property(mgr->dev.of_node, "label", NULL); 300 ret = dev_set_name(&mgr->dev, "fpga%d", id);
304 if (dt_label) 301 if (ret)
305 ret = dev_set_name(&mgr->dev, "%s", dt_label); 302 goto error_device;
306 else
307 ret = dev_set_name(&mgr->dev, "fpga%d", id);
308 303
309 ret = device_add(&mgr->dev); 304 ret = device_add(&mgr->dev);
310 if (ret) 305 if (ret)
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index e5827a56ff3b..5eaea8b812cf 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -113,7 +113,7 @@ static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); 113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
114 114
115 __raw_writel( 115 __raw_writel(
116 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset), 116 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
117 ctrl->base + AR71XX_GPIO_REG_OE); 117 ctrl->base + AR71XX_GPIO_REG_OE);
118 118
119 spin_unlock_irqrestore(&ctrl->lock, flags); 119 spin_unlock_irqrestore(&ctrl->lock, flags);
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index bd5193c67a9c..88ae70ddb127 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -141,9 +141,9 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
141 unsigned long pinmask = bgc->pin2mask(bgc, gpio); 141 unsigned long pinmask = bgc->pin2mask(bgc, gpio);
142 142
143 if (bgc->dir & pinmask) 143 if (bgc->dir & pinmask)
144 return bgc->read_reg(bgc->reg_set) & pinmask; 144 return !!(bgc->read_reg(bgc->reg_set) & pinmask);
145 else 145 else
146 return bgc->read_reg(bgc->reg_dat) & pinmask; 146 return !!(bgc->read_reg(bgc->reg_dat) & pinmask);
147} 147}
148 148
149static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) 149static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 2a91f3287e3b..4e4c3083ae56 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1279,7 +1279,13 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
1279 chip = desc->chip; 1279 chip = desc->chip;
1280 offset = gpio_chip_hwgpio(desc); 1280 offset = gpio_chip_hwgpio(desc);
1281 value = chip->get ? chip->get(chip, offset) : -EIO; 1281 value = chip->get ? chip->get(chip, offset) : -EIO;
1282 value = value < 0 ? value : !!value; 1282 /*
1283 * FIXME: fix all drivers to clamp to [0,1] or return negative,
1284 * then change this to:
1285 * value = value < 0 ? value : !!value;
1286 * so we can properly propagate error codes.
1287 */
1288 value = !!value;
1283 trace_gpio_value(desc_to_gpio(desc), 1, value); 1289 trace_gpio_value(desc_to_gpio(desc), 1, value);
1284 return value; 1290 return value;
1285} 1291}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c4bf9a1cf4a6..59babd5a5396 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -160,6 +160,7 @@ config DRM_AMDGPU
160 If M is selected, the module will be called amdgpu. 160 If M is selected, the module will be called amdgpu.
161 161
162source "drivers/gpu/drm/amd/amdgpu/Kconfig" 162source "drivers/gpu/drm/amd/amdgpu/Kconfig"
163source "drivers/gpu/drm/amd/powerplay/Kconfig"
163 164
164source "drivers/gpu/drm/nouveau/Kconfig" 165source "drivers/gpu/drm/nouveau/Kconfig"
165 166
@@ -266,3 +267,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
266source "drivers/gpu/drm/imx/Kconfig" 267source "drivers/gpu/drm/imx/Kconfig"
267 268
268source "drivers/gpu/drm/vc4/Kconfig" 269source "drivers/gpu/drm/vc4/Kconfig"
270
271source "drivers/gpu/drm/etnaviv/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1e9ff4c3e3db..f858aa25fbb2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -75,3 +75,4 @@ obj-y += i2c/
75obj-y += panel/ 75obj-y += panel/
76obj-y += bridge/ 76obj-y += bridge/
77obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ 77obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
78obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 04c270757030..66f729eaf00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -2,10 +2,13 @@
2# Makefile for the drm device driver. This driver provides support for the 2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \ 5FULL_AMD_PATH=$(src)/..
6 -Idrivers/gpu/drm/amd/include \ 6
7 -Idrivers/gpu/drm/amd/amdgpu \ 7ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
8 -Idrivers/gpu/drm/amd/scheduler 8 -I$(FULL_AMD_PATH)/include \
9 -I$(FULL_AMD_PATH)/amdgpu \
10 -I$(FULL_AMD_PATH)/scheduler \
11 -I$(FULL_AMD_PATH)/powerplay/inc
9 12
10amdgpu-y := amdgpu_drv.o 13amdgpu-y := amdgpu_drv.o
11 14
@@ -44,6 +47,7 @@ amdgpu-y += \
44# add SMC block 47# add SMC block
45amdgpu-y += \ 48amdgpu-y += \
46 amdgpu_dpm.o \ 49 amdgpu_dpm.o \
50 amdgpu_powerplay.o \
47 cz_smc.o cz_dpm.o \ 51 cz_smc.o cz_dpm.o \
48 tonga_smc.o tonga_dpm.o \ 52 tonga_smc.o tonga_dpm.o \
49 fiji_smc.o fiji_dpm.o \ 53 fiji_smc.o fiji_dpm.o \
@@ -94,6 +98,14 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
94amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o 98amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
95amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o 99amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
96 100
101ifneq ($(CONFIG_DRM_AMD_POWERPLAY),)
102
103include $(FULL_AMD_PATH)/powerplay/Makefile
104
105amdgpu-y += $(AMD_POWERPLAY_FILES)
106
107endif
108
97obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o 109obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
98 110
99CFLAGS_amdgpu_trace_points.o := -I$(src) 111CFLAGS_amdgpu_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5a5f04d0902d..313b0cc8d676 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -52,6 +52,7 @@
52#include "amdgpu_irq.h" 52#include "amdgpu_irq.h"
53#include "amdgpu_ucode.h" 53#include "amdgpu_ucode.h"
54#include "amdgpu_gds.h" 54#include "amdgpu_gds.h"
55#include "amd_powerplay.h"
55 56
56#include "gpu_scheduler.h" 57#include "gpu_scheduler.h"
57 58
@@ -85,6 +86,7 @@ extern int amdgpu_enable_scheduler;
85extern int amdgpu_sched_jobs; 86extern int amdgpu_sched_jobs;
86extern int amdgpu_sched_hw_submission; 87extern int amdgpu_sched_hw_submission;
87extern int amdgpu_enable_semaphores; 88extern int amdgpu_enable_semaphores;
89extern int amdgpu_powerplay;
88 90
89#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 91#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
90#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 92#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -918,8 +920,8 @@ struct amdgpu_ring {
918#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 920#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
919 921
920struct amdgpu_vm_pt { 922struct amdgpu_vm_pt {
921 struct amdgpu_bo *bo; 923 struct amdgpu_bo_list_entry entry;
922 uint64_t addr; 924 uint64_t addr;
923}; 925};
924 926
925struct amdgpu_vm_id { 927struct amdgpu_vm_id {
@@ -981,9 +983,12 @@ struct amdgpu_vm_manager {
981void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 983void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
982int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 984int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
983void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 985void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
984struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, 986void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
985 struct amdgpu_vm *vm, 987 struct list_head *validated,
986 struct list_head *head); 988 struct amdgpu_bo_list_entry *entry);
989void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
990void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
991 struct amdgpu_vm *vm);
987int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 992int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
988 struct amdgpu_sync *sync); 993 struct amdgpu_sync *sync);
989void amdgpu_vm_flush(struct amdgpu_ring *ring, 994void amdgpu_vm_flush(struct amdgpu_ring *ring,
@@ -1024,11 +1029,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job);
1024 * context related structures 1029 * context related structures
1025 */ 1030 */
1026 1031
1027#define AMDGPU_CTX_MAX_CS_PENDING 16
1028
1029struct amdgpu_ctx_ring { 1032struct amdgpu_ctx_ring {
1030 uint64_t sequence; 1033 uint64_t sequence;
1031 struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; 1034 struct fence **fences;
1032 struct amd_sched_entity entity; 1035 struct amd_sched_entity entity;
1033}; 1036};
1034 1037
@@ -1037,6 +1040,7 @@ struct amdgpu_ctx {
1037 struct amdgpu_device *adev; 1040 struct amdgpu_device *adev;
1038 unsigned reset_counter; 1041 unsigned reset_counter;
1039 spinlock_t ring_lock; 1042 spinlock_t ring_lock;
1043 struct fence **fences;
1040 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 1044 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
1041}; 1045};
1042 1046
@@ -1047,7 +1051,7 @@ struct amdgpu_ctx_mgr {
1047 struct idr ctx_handles; 1051 struct idr ctx_handles;
1048}; 1052};
1049 1053
1050int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, 1054int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
1051 struct amdgpu_ctx *ctx); 1055 struct amdgpu_ctx *ctx);
1052void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); 1056void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
1053 1057
@@ -1254,7 +1258,7 @@ struct amdgpu_cs_parser {
1254 unsigned nchunks; 1258 unsigned nchunks;
1255 struct amdgpu_cs_chunk *chunks; 1259 struct amdgpu_cs_chunk *chunks;
1256 /* relocations */ 1260 /* relocations */
1257 struct amdgpu_bo_list_entry *vm_bos; 1261 struct amdgpu_bo_list_entry vm_pd;
1258 struct list_head validated; 1262 struct list_head validated;
1259 struct fence *fence; 1263 struct fence *fence;
1260 1264
@@ -1264,7 +1268,8 @@ struct amdgpu_cs_parser {
1264 struct ww_acquire_ctx ticket; 1268 struct ww_acquire_ctx ticket;
1265 1269
1266 /* user fence */ 1270 /* user fence */
1267 struct amdgpu_user_fence uf; 1271 struct amdgpu_user_fence uf;
1272 struct amdgpu_bo_list_entry uf_entry;
1268}; 1273};
1269 1274
1270struct amdgpu_job { 1275struct amdgpu_job {
@@ -1300,31 +1305,7 @@ struct amdgpu_wb {
1300int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); 1305int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
1301void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); 1306void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
1302 1307
1303/**
1304 * struct amdgpu_pm - power management datas
1305 * It keeps track of various data needed to take powermanagement decision.
1306 */
1307 1308
1308enum amdgpu_pm_state_type {
1309 /* not used for dpm */
1310 POWER_STATE_TYPE_DEFAULT,
1311 POWER_STATE_TYPE_POWERSAVE,
1312 /* user selectable states */
1313 POWER_STATE_TYPE_BATTERY,
1314 POWER_STATE_TYPE_BALANCED,
1315 POWER_STATE_TYPE_PERFORMANCE,
1316 /* internal states */
1317 POWER_STATE_TYPE_INTERNAL_UVD,
1318 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1319 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1320 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1321 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1322 POWER_STATE_TYPE_INTERNAL_BOOT,
1323 POWER_STATE_TYPE_INTERNAL_THERMAL,
1324 POWER_STATE_TYPE_INTERNAL_ACPI,
1325 POWER_STATE_TYPE_INTERNAL_ULV,
1326 POWER_STATE_TYPE_INTERNAL_3DPERF,
1327};
1328 1309
1329enum amdgpu_int_thermal_type { 1310enum amdgpu_int_thermal_type {
1330 THERMAL_TYPE_NONE, 1311 THERMAL_TYPE_NONE,
@@ -1606,8 +1587,8 @@ struct amdgpu_dpm {
1606 /* vce requirements */ 1587 /* vce requirements */
1607 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; 1588 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
1608 enum amdgpu_vce_level vce_level; 1589 enum amdgpu_vce_level vce_level;
1609 enum amdgpu_pm_state_type state; 1590 enum amd_pm_state_type state;
1610 enum amdgpu_pm_state_type user_state; 1591 enum amd_pm_state_type user_state;
1611 u32 platform_caps; 1592 u32 platform_caps;
1612 u32 voltage_response_time; 1593 u32 voltage_response_time;
1613 u32 backbias_response_time; 1594 u32 backbias_response_time;
@@ -1660,8 +1641,13 @@ struct amdgpu_pm {
1660 const struct firmware *fw; /* SMC firmware */ 1641 const struct firmware *fw; /* SMC firmware */
1661 uint32_t fw_version; 1642 uint32_t fw_version;
1662 const struct amdgpu_dpm_funcs *funcs; 1643 const struct amdgpu_dpm_funcs *funcs;
1644 uint32_t pcie_gen_mask;
1645 uint32_t pcie_mlw_mask;
1646 struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
1663}; 1647};
1664 1648
1649void amdgpu_get_pcie_info(struct amdgpu_device *adev);
1650
1665/* 1651/*
1666 * UVD 1652 * UVD
1667 */ 1653 */
@@ -1829,6 +1815,8 @@ struct amdgpu_cu_info {
1829 */ 1815 */
1830struct amdgpu_asic_funcs { 1816struct amdgpu_asic_funcs {
1831 bool (*read_disabled_bios)(struct amdgpu_device *adev); 1817 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1818 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1819 u8 *bios, u32 length_bytes);
1832 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 1820 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1833 u32 sh_num, u32 reg_offset, u32 *value); 1821 u32 sh_num, u32 reg_offset, u32 *value);
1834 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1822 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -2059,6 +2047,10 @@ struct amdgpu_device {
2059 /* interrupts */ 2047 /* interrupts */
2060 struct amdgpu_irq irq; 2048 struct amdgpu_irq irq;
2061 2049
2050 /* powerplay */
2051 struct amd_powerplay powerplay;
2052 bool pp_enabled;
2053
2062 /* dpm */ 2054 /* dpm */
2063 struct amdgpu_pm pm; 2055 struct amdgpu_pm pm;
2064 u32 cg_flags; 2056 u32 cg_flags;
@@ -2235,6 +2227,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2235#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2227#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2236#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2228#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2237#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2229#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2230#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
2238#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2231#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2239#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) 2232#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
2240#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2233#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
@@ -2276,24 +2269,78 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2276#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) 2269#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
2277#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) 2270#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
2278#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) 2271#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
2279#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
2280#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) 2272#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
2281#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) 2273#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
2282#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) 2274#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
2283#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) 2275#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
2284#define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
2285#define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
2286#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) 2276#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2287#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
2288#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
2289#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) 2277#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2290#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
2291#define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
2292#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2278#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2293#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m)) 2279
2294#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev)) 2280#define amdgpu_dpm_get_temperature(adev) \
2295#define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) 2281 (adev)->pp_enabled ? \
2296#define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) 2282 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
2283 (adev)->pm.funcs->get_temperature((adev))
2284
2285#define amdgpu_dpm_set_fan_control_mode(adev, m) \
2286 (adev)->pp_enabled ? \
2287 (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
2288 (adev)->pm.funcs->set_fan_control_mode((adev), (m))
2289
2290#define amdgpu_dpm_get_fan_control_mode(adev) \
2291 (adev)->pp_enabled ? \
2292 (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
2293 (adev)->pm.funcs->get_fan_control_mode((adev))
2294
2295#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
2296 (adev)->pp_enabled ? \
2297 (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2298 (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
2299
2300#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
2301 (adev)->pp_enabled ? \
2302 (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2303 (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
2304
2305#define amdgpu_dpm_get_sclk(adev, l) \
2306 (adev)->pp_enabled ? \
2307 (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
2308 (adev)->pm.funcs->get_sclk((adev), (l))
2309
2310#define amdgpu_dpm_get_mclk(adev, l) \
2311 (adev)->pp_enabled ? \
2312 (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
2313 (adev)->pm.funcs->get_mclk((adev), (l))
2314
2315
2316#define amdgpu_dpm_force_performance_level(adev, l) \
2317 (adev)->pp_enabled ? \
2318 (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
2319 (adev)->pm.funcs->force_performance_level((adev), (l))
2320
2321#define amdgpu_dpm_powergate_uvd(adev, g) \
2322 (adev)->pp_enabled ? \
2323 (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
2324 (adev)->pm.funcs->powergate_uvd((adev), (g))
2325
2326#define amdgpu_dpm_powergate_vce(adev, g) \
2327 (adev)->pp_enabled ? \
2328 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
2329 (adev)->pm.funcs->powergate_vce((adev), (g))
2330
2331#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
2332 (adev)->pp_enabled ? \
2333 (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
2334 (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
2335
2336#define amdgpu_dpm_get_current_power_state(adev) \
2337 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
2338
2339#define amdgpu_dpm_get_performance_level(adev) \
2340 (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
2341
2342#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
2343 (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
2297 2344
2298#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) 2345#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
2299 2346
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index a142d5ae148d..5cd7b736a9de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -29,66 +29,10 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
31#include "amdgpu.h" 31#include "amdgpu.h"
32#include "amdgpu_acpi.h" 32#include "amd_acpi.h"
33#include "atom.h" 33#include "atom.h"
34 34
35#define ACPI_AC_CLASS "ac_adapter"
36
37extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); 35extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
38
39struct atif_verify_interface {
40 u16 size; /* structure size in bytes (includes size field) */
41 u16 version; /* version */
42 u32 notification_mask; /* supported notifications mask */
43 u32 function_bits; /* supported functions bit vector */
44} __packed;
45
46struct atif_system_params {
47 u16 size; /* structure size in bytes (includes size field) */
48 u32 valid_mask; /* valid flags mask */
49 u32 flags; /* flags */
50 u8 command_code; /* notify command code */
51} __packed;
52
53struct atif_sbios_requests {
54 u16 size; /* structure size in bytes (includes size field) */
55 u32 pending; /* pending sbios requests */
56 u8 panel_exp_mode; /* panel expansion mode */
57 u8 thermal_gfx; /* thermal state: target gfx controller */
58 u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
59 u8 forced_power_gfx; /* forced power state: target gfx controller */
60 u8 forced_power_state; /* forced power state: state id */
61 u8 system_power_src; /* system power source */
62 u8 backlight_level; /* panel backlight level (0-255) */
63} __packed;
64
65#define ATIF_NOTIFY_MASK 0x3
66#define ATIF_NOTIFY_NONE 0
67#define ATIF_NOTIFY_81 1
68#define ATIF_NOTIFY_N 2
69
70struct atcs_verify_interface {
71 u16 size; /* structure size in bytes (includes size field) */
72 u16 version; /* version */
73 u32 function_bits; /* supported functions bit vector */
74} __packed;
75
76#define ATCS_VALID_FLAGS_MASK 0x3
77
78struct atcs_pref_req_input {
79 u16 size; /* structure size in bytes (includes size field) */
80 u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
81 u16 valid_flags_mask; /* valid flags mask */
82 u16 flags; /* flags */
83 u8 req_type; /* request type */
84 u8 perf_req; /* performance request */
85} __packed;
86
87struct atcs_pref_req_output {
88 u16 size; /* structure size in bytes (includes size field) */
89 u8 ret_val; /* return value */
90} __packed;
91
92/* Call the ATIF method 36/* Call the ATIF method
93 */ 37 */
94/** 38/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 5a8fbadbd27b..3c895863fcf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -11,7 +11,7 @@
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13 13
14#include "amdgpu_acpi.h" 14#include "amd_acpi.h"
15 15
16struct amdgpu_atpx_functions { 16struct amdgpu_atpx_functions {
17 bool px_params; 17 bool px_params;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index c44c0c6afd1b..80add22375ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -35,6 +35,13 @@
35 * BIOS. 35 * BIOS.
36 */ 36 */
37 37
38#define AMD_VBIOS_SIGNATURE " 761295520"
39#define AMD_VBIOS_SIGNATURE_OFFSET 0x30
40#define AMD_VBIOS_SIGNATURE_SIZE sizeof(AMD_VBIOS_SIGNATURE)
41#define AMD_VBIOS_SIGNATURE_END (AMD_VBIOS_SIGNATURE_OFFSET + AMD_VBIOS_SIGNATURE_SIZE)
42#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA)
43#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9)
44
38/* If you boot an IGP board with a discrete card as the primary, 45/* If you boot an IGP board with a discrete card as the primary,
39 * the IGP rom is not accessible via the rom bar as the IGP rom is 46 * the IGP rom is not accessible via the rom bar as the IGP rom is
40 * part of the system bios. On boot, the system bios puts a 47 * part of the system bios. On boot, the system bios puts a
@@ -58,7 +65,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
58 return false; 65 return false;
59 } 66 }
60 67
61 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { 68 if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
62 iounmap(bios); 69 iounmap(bios);
63 return false; 70 return false;
64 } 71 }
@@ -74,7 +81,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
74 81
75bool amdgpu_read_bios(struct amdgpu_device *adev) 82bool amdgpu_read_bios(struct amdgpu_device *adev)
76{ 83{
77 uint8_t __iomem *bios, val1, val2; 84 uint8_t __iomem *bios, val[2];
78 size_t size; 85 size_t size;
79 86
80 adev->bios = NULL; 87 adev->bios = NULL;
@@ -84,10 +91,10 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
84 return false; 91 return false;
85 } 92 }
86 93
87 val1 = readb(&bios[0]); 94 val[0] = readb(&bios[0]);
88 val2 = readb(&bios[1]); 95 val[1] = readb(&bios[1]);
89 96
90 if (size == 0 || val1 != 0x55 || val2 != 0xaa) { 97 if (size == 0 || !AMD_IS_VALID_VBIOS(val)) {
91 pci_unmap_rom(adev->pdev, bios); 98 pci_unmap_rom(adev->pdev, bios);
92 return false; 99 return false;
93 } 100 }
@@ -101,6 +108,38 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
101 return true; 108 return true;
102} 109}
103 110
111static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
112{
113 u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
114 int len;
115
116 if (!adev->asic_funcs->read_bios_from_rom)
117 return false;
118
119 /* validate VBIOS signature */
120 if (amdgpu_asic_read_bios_from_rom(adev, &header[0], sizeof(header)) == false)
121 return false;
122 header[AMD_VBIOS_SIGNATURE_END] = 0;
123
124 if ((!AMD_IS_VALID_VBIOS(header)) ||
125 0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
126 AMD_VBIOS_SIGNATURE,
127 strlen(AMD_VBIOS_SIGNATURE)))
128 return false;
129
130 /* valid vbios, go on */
131 len = AMD_VBIOS_LENGTH(header);
132 len = ALIGN(len, 4);
133 adev->bios = kmalloc(len, GFP_KERNEL);
134 if (!adev->bios) {
135 DRM_ERROR("no memory to allocate for BIOS\n");
136 return false;
137 }
138
139 /* read complete BIOS */
140 return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
141}
142
104static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) 143static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
105{ 144{
106 uint8_t __iomem *bios; 145 uint8_t __iomem *bios;
@@ -113,7 +152,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
113 return false; 152 return false;
114 } 153 }
115 154
116 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { 155 if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
117 return false; 156 return false;
118 } 157 }
119 adev->bios = kmemdup(bios, size, GFP_KERNEL); 158 adev->bios = kmemdup(bios, size, GFP_KERNEL);
@@ -230,7 +269,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
230 break; 269 break;
231 } 270 }
232 271
233 if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { 272 if (i == 0 || !AMD_IS_VALID_VBIOS(adev->bios)) {
234 kfree(adev->bios); 273 kfree(adev->bios);
235 return false; 274 return false;
236 } 275 }
@@ -320,6 +359,9 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
320 if (r == false) 359 if (r == false)
321 r = amdgpu_read_bios(adev); 360 r = amdgpu_read_bios(adev);
322 if (r == false) { 361 if (r == false) {
362 r = amdgpu_read_bios_from_rom(adev);
363 }
364 if (r == false) {
323 r = amdgpu_read_disabled_bios(adev); 365 r = amdgpu_read_disabled_bios(adev);
324 } 366 }
325 if (r == false) { 367 if (r == false) {
@@ -330,7 +372,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
330 adev->bios = NULL; 372 adev->bios = NULL;
331 return false; 373 return false;
332 } 374 }
333 if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { 375 if (!AMD_IS_VALID_VBIOS(adev->bios)) {
334 printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]); 376 printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
335 goto free_bios; 377 goto free_bios;
336 } 378 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8e995148f56e..a081dda9fa2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -24,6 +24,7 @@
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/acpi.h>
27#include <drm/drmP.h> 28#include <drm/drmP.h>
28#include <linux/firmware.h> 29#include <linux/firmware.h>
29#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
@@ -32,7 +33,6 @@
32#include "atom.h" 33#include "atom.h"
33#include "amdgpu_ucode.h" 34#include "amdgpu_ucode.h"
34 35
35
36struct amdgpu_cgs_device { 36struct amdgpu_cgs_device {
37 struct cgs_device base; 37 struct cgs_device base;
38 struct amdgpu_device *adev; 38 struct amdgpu_device *adev;
@@ -398,6 +398,41 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
398 WARN(ret, "pci_write_config_dword error"); 398 WARN(ret, "pci_write_config_dword error");
399} 399}
400 400
401
402static int amdgpu_cgs_get_pci_resource(void *cgs_device,
403 enum cgs_resource_type resource_type,
404 uint64_t size,
405 uint64_t offset,
406 uint64_t *resource_base)
407{
408 CGS_FUNC_ADEV;
409
410 if (resource_base == NULL)
411 return -EINVAL;
412
413 switch (resource_type) {
414 case CGS_RESOURCE_TYPE_MMIO:
415 if (adev->rmmio_size == 0)
416 return -ENOENT;
417 if ((offset + size) > adev->rmmio_size)
418 return -EINVAL;
419 *resource_base = adev->rmmio_base;
420 return 0;
421 case CGS_RESOURCE_TYPE_DOORBELL:
422 if (adev->doorbell.size == 0)
423 return -ENOENT;
424 if ((offset + size) > adev->doorbell.size)
425 return -EINVAL;
426 *resource_base = adev->doorbell.base;
427 return 0;
428 case CGS_RESOURCE_TYPE_FB:
429 case CGS_RESOURCE_TYPE_IO:
430 case CGS_RESOURCE_TYPE_ROM:
431 default:
432 return -EINVAL;
433 }
434}
435
401static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, 436static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
402 unsigned table, uint16_t *size, 437 unsigned table, uint16_t *size,
403 uint8_t *frev, uint8_t *crev) 438 uint8_t *frev, uint8_t *crev)
@@ -703,6 +738,9 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
703 case CHIP_TONGA: 738 case CHIP_TONGA:
704 strcpy(fw_name, "amdgpu/tonga_smc.bin"); 739 strcpy(fw_name, "amdgpu/tonga_smc.bin");
705 break; 740 break;
741 case CHIP_FIJI:
742 strcpy(fw_name, "amdgpu/fiji_smc.bin");
743 break;
706 default: 744 default:
707 DRM_ERROR("SMC firmware not supported\n"); 745 DRM_ERROR("SMC firmware not supported\n");
708 return -EINVAL; 746 return -EINVAL;
@@ -736,6 +774,288 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
736 return 0; 774 return 0;
737} 775}
738 776
777static int amdgpu_cgs_query_system_info(void *cgs_device,
778 struct cgs_system_info *sys_info)
779{
780 CGS_FUNC_ADEV;
781
782 if (NULL == sys_info)
783 return -ENODEV;
784
785 if (sizeof(struct cgs_system_info) != sys_info->size)
786 return -ENODEV;
787
788 switch (sys_info->info_id) {
789 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
790 sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
791 break;
792 case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
793 sys_info->value = adev->pm.pcie_gen_mask;
794 break;
795 case CGS_SYSTEM_INFO_PCIE_MLW:
796 sys_info->value = adev->pm.pcie_mlw_mask;
797 break;
798 default:
799 return -ENODEV;
800 }
801
802 return 0;
803}
804
805static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
806 struct cgs_display_info *info)
807{
808 CGS_FUNC_ADEV;
809 struct amdgpu_crtc *amdgpu_crtc;
810 struct drm_device *ddev = adev->ddev;
811 struct drm_crtc *crtc;
812 uint32_t line_time_us, vblank_lines;
813
814 if (info == NULL)
815 return -EINVAL;
816
817 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
818 list_for_each_entry(crtc,
819 &ddev->mode_config.crtc_list, head) {
820 amdgpu_crtc = to_amdgpu_crtc(crtc);
821 if (crtc->enabled) {
822 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
823 info->display_count++;
824 }
825 if (info->mode_info != NULL &&
826 crtc->enabled && amdgpu_crtc->enabled &&
827 amdgpu_crtc->hw_mode.clock) {
828 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
829 amdgpu_crtc->hw_mode.clock;
830 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
831 amdgpu_crtc->hw_mode.crtc_vdisplay +
832 (amdgpu_crtc->v_border * 2);
833 info->mode_info->vblank_time_us = vblank_lines * line_time_us;
834 info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
835 info->mode_info->ref_clock = adev->clock.spll.reference_freq;
836 info->mode_info++;
837 }
838 }
839 }
840
841 return 0;
842}
843
844/** \brief evaluate acpi namespace object, handle or pathname must be valid
845 * \param cgs_device
846 * \param info input/output arguments for the control method
847 * \return status
848 */
849
850#if defined(CONFIG_ACPI)
851static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
852 struct cgs_acpi_method_info *info)
853{
854 CGS_FUNC_ADEV;
855 acpi_handle handle;
856 struct acpi_object_list input;
857 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
858 union acpi_object *params = NULL;
859 union acpi_object *obj = NULL;
860 uint8_t name[5] = {'\0'};
861 struct cgs_acpi_method_argument *argument = NULL;
862 uint32_t i, count;
863 acpi_status status;
864 int result;
865 uint32_t func_no = 0xFFFFFFFF;
866
867 handle = ACPI_HANDLE(&adev->pdev->dev);
868 if (!handle)
869 return -ENODEV;
870
871 memset(&input, 0, sizeof(struct acpi_object_list));
872
873 /* validate input info */
874 if (info->size != sizeof(struct cgs_acpi_method_info))
875 return -EINVAL;
876
877 input.count = info->input_count;
878 if (info->input_count > 0) {
879 if (info->pinput_argument == NULL)
880 return -EINVAL;
881 argument = info->pinput_argument;
882 func_no = argument->value;
883 for (i = 0; i < info->input_count; i++) {
884 if (((argument->type == ACPI_TYPE_STRING) ||
885 (argument->type == ACPI_TYPE_BUFFER)) &&
886 (argument->pointer == NULL))
887 return -EINVAL;
888 argument++;
889 }
890 }
891
892 if (info->output_count > 0) {
893 if (info->poutput_argument == NULL)
894 return -EINVAL;
895 argument = info->poutput_argument;
896 for (i = 0; i < info->output_count; i++) {
897 if (((argument->type == ACPI_TYPE_STRING) ||
898 (argument->type == ACPI_TYPE_BUFFER))
899 && (argument->pointer == NULL))
900 return -EINVAL;
901 argument++;
902 }
903 }
904
905 /* The path name passed to acpi_evaluate_object should be null terminated */
906 if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
907 strncpy(name, (char *)&(info->name), sizeof(uint32_t));
908 name[4] = '\0';
909 }
910
911 /* parse input parameters */
912 if (input.count > 0) {
913 input.pointer = params =
914 kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
915 if (params == NULL)
916 return -EINVAL;
917
918 argument = info->pinput_argument;
919
920 for (i = 0; i < input.count; i++) {
921 params->type = argument->type;
922 switch (params->type) {
923 case ACPI_TYPE_INTEGER:
924 params->integer.value = argument->value;
925 break;
926 case ACPI_TYPE_STRING:
927 params->string.length = argument->method_length;
928 params->string.pointer = argument->pointer;
929 break;
930 case ACPI_TYPE_BUFFER:
931 params->buffer.length = argument->method_length;
932 params->buffer.pointer = argument->pointer;
933 break;
934 default:
935 break;
936 }
937 params++;
938 argument++;
939 }
940 }
941
942 /* parse output info */
943 count = info->output_count;
944 argument = info->poutput_argument;
945
946 /* evaluate the acpi method */
947 status = acpi_evaluate_object(handle, name, &input, &output);
948
949 if (ACPI_FAILURE(status)) {
950 result = -EIO;
951 goto error;
952 }
953
954 /* return the output info */
955 obj = output.pointer;
956
957 if (count > 1) {
958 if ((obj->type != ACPI_TYPE_PACKAGE) ||
959 (obj->package.count != count)) {
960 result = -EIO;
961 goto error;
962 }
963 params = obj->package.elements;
964 } else
965 params = obj;
966
967 if (params == NULL) {
968 result = -EIO;
969 goto error;
970 }
971
972 for (i = 0; i < count; i++) {
973 if (argument->type != params->type) {
974 result = -EIO;
975 goto error;
976 }
977 switch (params->type) {
978 case ACPI_TYPE_INTEGER:
979 argument->value = params->integer.value;
980 break;
981 case ACPI_TYPE_STRING:
982 if ((params->string.length != argument->data_length) ||
983 (params->string.pointer == NULL)) {
984 result = -EIO;
985 goto error;
986 }
987 strncpy(argument->pointer,
988 params->string.pointer,
989 params->string.length);
990 break;
991 case ACPI_TYPE_BUFFER:
992 if (params->buffer.pointer == NULL) {
993 result = -EIO;
994 goto error;
995 }
996 memcpy(argument->pointer,
997 params->buffer.pointer,
998 argument->data_length);
999 break;
1000 default:
1001 break;
1002 }
1003 argument++;
1004 params++;
1005 }
1006
1007error:
1008 if (obj != NULL)
1009 kfree(obj);
1010 kfree((void *)input.pointer);
1011 return result;
1012}
1013#else
1014static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
1015 struct cgs_acpi_method_info *info)
1016{
1017 return -EIO;
1018}
1019#endif
1020
1021int amdgpu_cgs_call_acpi_method(void *cgs_device,
1022 uint32_t acpi_method,
1023 uint32_t acpi_function,
1024 void *pinput, void *poutput,
1025 uint32_t output_count,
1026 uint32_t input_size,
1027 uint32_t output_size)
1028{
1029 struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1030 struct cgs_acpi_method_argument acpi_output = {0};
1031 struct cgs_acpi_method_info info = {0};
1032
1033 acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1034 acpi_input[0].method_length = sizeof(uint32_t);
1035 acpi_input[0].data_length = sizeof(uint32_t);
1036 acpi_input[0].value = acpi_function;
1037
1038 acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1039 acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
1040 acpi_input[1].data_length = input_size;
1041 acpi_input[1].pointer = pinput;
1042
1043 acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1044 acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
1045 acpi_output.data_length = output_size;
1046 acpi_output.pointer = poutput;
1047
1048 info.size = sizeof(struct cgs_acpi_method_info);
1049 info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1050 info.input_count = 2;
1051 info.name = acpi_method;
1052 info.pinput_argument = acpi_input;
1053 info.output_count = output_count;
1054 info.poutput_argument = &acpi_output;
1055
1056 return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1057}
1058
739static const struct cgs_ops amdgpu_cgs_ops = { 1059static const struct cgs_ops amdgpu_cgs_ops = {
740 amdgpu_cgs_gpu_mem_info, 1060 amdgpu_cgs_gpu_mem_info,
741 amdgpu_cgs_gmap_kmem, 1061 amdgpu_cgs_gmap_kmem,
@@ -756,6 +1076,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
756 amdgpu_cgs_write_pci_config_byte, 1076 amdgpu_cgs_write_pci_config_byte,
757 amdgpu_cgs_write_pci_config_word, 1077 amdgpu_cgs_write_pci_config_word,
758 amdgpu_cgs_write_pci_config_dword, 1078 amdgpu_cgs_write_pci_config_dword,
1079 amdgpu_cgs_get_pci_resource,
759 amdgpu_cgs_atom_get_data_table, 1080 amdgpu_cgs_atom_get_data_table,
760 amdgpu_cgs_atom_get_cmd_table_revs, 1081 amdgpu_cgs_atom_get_cmd_table_revs,
761 amdgpu_cgs_atom_exec_cmd_table, 1082 amdgpu_cgs_atom_exec_cmd_table,
@@ -768,7 +1089,10 @@ static const struct cgs_ops amdgpu_cgs_ops = {
768 amdgpu_cgs_set_camera_voltages, 1089 amdgpu_cgs_set_camera_voltages,
769 amdgpu_cgs_get_firmware_info, 1090 amdgpu_cgs_get_firmware_info,
770 amdgpu_cgs_set_powergating_state, 1091 amdgpu_cgs_set_powergating_state,
771 amdgpu_cgs_set_clockgating_state 1092 amdgpu_cgs_set_clockgating_state,
1093 amdgpu_cgs_get_active_displays_info,
1094 amdgpu_cgs_call_acpi_method,
1095 amdgpu_cgs_query_system_info,
772}; 1096};
773 1097
774static const struct cgs_os_ops amdgpu_cgs_os_ops = { 1098static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4f352ec9dec4..6f89f8e034d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
131 struct drm_amdgpu_cs_chunk_fence *fence_data)
132{
133 struct drm_gem_object *gobj;
134 uint32_t handle;
135
136 handle = fence_data->handle;
137 gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
138 fence_data->handle);
139 if (gobj == NULL)
140 return -EINVAL;
141
142 p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
143 p->uf.offset = fence_data->offset;
144
145 if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
146 drm_gem_object_unreference_unlocked(gobj);
147 return -EINVAL;
148 }
149
150 p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
151 p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
152 p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
153 p->uf_entry.priority = 0;
154 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
155 p->uf_entry.tv.shared = true;
156
157 drm_gem_object_unreference_unlocked(gobj);
158 return 0;
159}
160
130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 161int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
131{ 162{
132 union drm_amdgpu_cs *cs = data; 163 union drm_amdgpu_cs *cs = data;
@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
207 238
208 case AMDGPU_CHUNK_ID_FENCE: 239 case AMDGPU_CHUNK_ID_FENCE:
209 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 240 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
210 if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { 241 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
211 uint32_t handle;
212 struct drm_gem_object *gobj;
213 struct drm_amdgpu_cs_chunk_fence *fence_data;
214
215 fence_data = (void *)p->chunks[i].kdata;
216 handle = fence_data->handle;
217 gobj = drm_gem_object_lookup(p->adev->ddev,
218 p->filp, handle);
219 if (gobj == NULL) {
220 ret = -EINVAL;
221 goto free_partial_kdata;
222 }
223
224 p->uf.bo = gem_to_amdgpu_bo(gobj);
225 amdgpu_bo_ref(p->uf.bo);
226 drm_gem_object_unreference_unlocked(gobj);
227 p->uf.offset = fence_data->offset;
228 } else {
229 ret = -EINVAL; 242 ret = -EINVAL;
230 goto free_partial_kdata; 243 goto free_partial_kdata;
231 } 244 }
245
246 ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
247 if (ret)
248 goto free_partial_kdata;
249
232 break; 250 break;
233 251
234 case AMDGPU_CHUNK_ID_DEPENDENCIES: 252 case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -388,26 +406,32 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
388 amdgpu_cs_buckets_get_list(&buckets, &p->validated); 406 amdgpu_cs_buckets_get_list(&buckets, &p->validated);
389 } 407 }
390 408
391 p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, 409 INIT_LIST_HEAD(&duplicates);
392 &p->validated); 410 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
411
412 if (p->uf.bo)
413 list_add(&p->uf_entry.tv.head, &p->validated);
393 414
394 if (need_mmap_lock) 415 if (need_mmap_lock)
395 down_read(&current->mm->mmap_sem); 416 down_read(&current->mm->mmap_sem);
396 417
397 INIT_LIST_HEAD(&duplicates);
398 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); 418 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
399 if (unlikely(r != 0)) 419 if (unlikely(r != 0))
400 goto error_reserve; 420 goto error_reserve;
401 421
402 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); 422 amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
423
424 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
403 if (r) 425 if (r)
404 goto error_validate; 426 goto error_validate;
405 427
406 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); 428 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
407 429
408error_validate: 430error_validate:
409 if (r) 431 if (r) {
432 amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
410 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 433 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
434 }
411 435
412error_reserve: 436error_reserve:
413 if (need_mmap_lock) 437 if (need_mmap_lock)
@@ -451,8 +475,11 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
451 **/ 475 **/
452static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) 476static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
453{ 477{
478 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
454 unsigned i; 479 unsigned i;
455 480
481 amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
482
456 if (!error) { 483 if (!error) {
457 /* Sort the buffer list from the smallest to largest buffer, 484 /* Sort the buffer list from the smallest to largest buffer,
458 * which affects the order of buffers in the LRU list. 485 * which affects the order of buffers in the LRU list.
@@ -480,7 +507,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
480 if (parser->bo_list) 507 if (parser->bo_list)
481 amdgpu_bo_list_put(parser->bo_list); 508 amdgpu_bo_list_put(parser->bo_list);
482 509
483 drm_free_large(parser->vm_bos);
484 for (i = 0; i < parser->nchunks; i++) 510 for (i = 0; i < parser->nchunks; i++)
485 drm_free_large(parser->chunks[i].kdata); 511 drm_free_large(parser->chunks[i].kdata);
486 kfree(parser->chunks); 512 kfree(parser->chunks);
@@ -488,8 +514,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
488 for (i = 0; i < parser->num_ibs; i++) 514 for (i = 0; i < parser->num_ibs; i++)
489 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 515 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
490 kfree(parser->ibs); 516 kfree(parser->ibs);
491 if (parser->uf.bo) 517 amdgpu_bo_unref(&parser->uf.bo);
492 amdgpu_bo_unref(&parser->uf.bo); 518 amdgpu_bo_unref(&parser->uf_entry.robj);
493} 519}
494 520
495static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 521static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index fec65f01c031..17d1fb12128a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,7 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "amdgpu.h" 26#include "amdgpu.h"
27 27
28int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, 28int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
29 struct amdgpu_ctx *ctx) 29 struct amdgpu_ctx *ctx)
30{ 30{
31 unsigned i, j; 31 unsigned i, j;
@@ -35,17 +35,25 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
35 ctx->adev = adev; 35 ctx->adev = adev;
36 kref_init(&ctx->refcount); 36 kref_init(&ctx->refcount);
37 spin_lock_init(&ctx->ring_lock); 37 spin_lock_init(&ctx->ring_lock);
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 38 ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
39 ctx->rings[i].sequence = 1; 39 AMDGPU_MAX_RINGS, GFP_KERNEL);
40 if (!ctx->fences)
41 return -ENOMEM;
40 42
43 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
44 ctx->rings[i].sequence = 1;
45 ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
46 amdgpu_sched_jobs * i;
47 }
41 if (amdgpu_enable_scheduler) { 48 if (amdgpu_enable_scheduler) {
42 /* create context entity for each ring */ 49 /* create context entity for each ring */
43 for (i = 0; i < adev->num_rings; i++) { 50 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 51 struct amd_sched_rq *rq;
45 if (kernel) 52 if (pri >= AMD_SCHED_MAX_PRIORITY) {
46 rq = &adev->rings[i]->sched.kernel_rq; 53 kfree(ctx->fences);
47 else 54 return -EINVAL;
48 rq = &adev->rings[i]->sched.sched_rq; 55 }
56 rq = &adev->rings[i]->sched.sched_rq[pri];
49 r = amd_sched_entity_init(&adev->rings[i]->sched, 57 r = amd_sched_entity_init(&adev->rings[i]->sched,
50 &ctx->rings[i].entity, 58 &ctx->rings[i].entity,
51 rq, amdgpu_sched_jobs); 59 rq, amdgpu_sched_jobs);
@@ -57,7 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
57 for (j = 0; j < i; j++) 65 for (j = 0; j < i; j++)
58 amd_sched_entity_fini(&adev->rings[j]->sched, 66 amd_sched_entity_fini(&adev->rings[j]->sched,
59 &ctx->rings[j].entity); 67 &ctx->rings[j].entity);
60 kfree(ctx); 68 kfree(ctx->fences);
61 return r; 69 return r;
62 } 70 }
63 } 71 }
@@ -73,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
73 return; 81 return;
74 82
75 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 83 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
76 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) 84 for (j = 0; j < amdgpu_sched_jobs; ++j)
77 fence_put(ctx->rings[i].fences[j]); 85 fence_put(ctx->rings[i].fences[j]);
86 kfree(ctx->fences);
78 87
79 if (amdgpu_enable_scheduler) { 88 if (amdgpu_enable_scheduler) {
80 for (i = 0; i < adev->num_rings; i++) 89 for (i = 0; i < adev->num_rings; i++)
@@ -103,9 +112,13 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
103 return r; 112 return r;
104 } 113 }
105 *id = (uint32_t)r; 114 *id = (uint32_t)r;
106 r = amdgpu_ctx_init(adev, false, ctx); 115 r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
116 if (r) {
117 idr_remove(&mgr->ctx_handles, *id);
118 *id = 0;
119 kfree(ctx);
120 }
107 mutex_unlock(&mgr->lock); 121 mutex_unlock(&mgr->lock);
108
109 return r; 122 return r;
110} 123}
111 124
@@ -239,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
239 unsigned idx = 0; 252 unsigned idx = 0;
240 struct fence *other = NULL; 253 struct fence *other = NULL;
241 254
242 idx = seq % AMDGPU_CTX_MAX_CS_PENDING; 255 idx = seq & (amdgpu_sched_jobs - 1);
243 other = cring->fences[idx]; 256 other = cring->fences[idx];
244 if (other) { 257 if (other) {
245 signed long r; 258 signed long r;
@@ -274,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
274 } 287 }
275 288
276 289
277 if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { 290 if (seq + amdgpu_sched_jobs < cring->sequence) {
278 spin_unlock(&ctx->ring_lock); 291 spin_unlock(&ctx->ring_lock);
279 return NULL; 292 return NULL;
280 } 293 }
281 294
282 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); 295 fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
283 spin_unlock(&ctx->ring_lock); 296 spin_unlock(&ctx->ring_lock);
284 297
285 return fence; 298 return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d5b421330145..65531463f88e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -38,6 +38,7 @@
38#include "amdgpu_i2c.h" 38#include "amdgpu_i2c.h"
39#include "atom.h" 39#include "atom.h"
40#include "amdgpu_atombios.h" 40#include "amdgpu_atombios.h"
41#include "amd_pcie.h"
41#ifdef CONFIG_DRM_AMDGPU_CIK 42#ifdef CONFIG_DRM_AMDGPU_CIK
42#include "cik.h" 43#include "cik.h"
43#endif 44#endif
@@ -949,6 +950,15 @@ static bool amdgpu_check_pot_argument(int arg)
949 */ 950 */
950static void amdgpu_check_arguments(struct amdgpu_device *adev) 951static void amdgpu_check_arguments(struct amdgpu_device *adev)
951{ 952{
953 if (amdgpu_sched_jobs < 4) {
954 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
955 amdgpu_sched_jobs);
956 amdgpu_sched_jobs = 4;
957 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
958 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
959 amdgpu_sched_jobs);
960 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
961 }
952 /* vramlimit must be a power of two */ 962 /* vramlimit must be a power of two */
953 if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { 963 if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
954 dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", 964 dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
@@ -1214,12 +1224,14 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1214 } else { 1224 } else {
1215 if (adev->ip_blocks[i].funcs->early_init) { 1225 if (adev->ip_blocks[i].funcs->early_init) {
1216 r = adev->ip_blocks[i].funcs->early_init((void *)adev); 1226 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
1217 if (r == -ENOENT) 1227 if (r == -ENOENT) {
1218 adev->ip_block_status[i].valid = false; 1228 adev->ip_block_status[i].valid = false;
1219 else if (r) 1229 } else if (r) {
1230 DRM_ERROR("early_init %d failed %d\n", i, r);
1220 return r; 1231 return r;
1221 else 1232 } else {
1222 adev->ip_block_status[i].valid = true; 1233 adev->ip_block_status[i].valid = true;
1234 }
1223 } else { 1235 } else {
1224 adev->ip_block_status[i].valid = true; 1236 adev->ip_block_status[i].valid = true;
1225 } 1237 }
@@ -1237,20 +1249,28 @@ static int amdgpu_init(struct amdgpu_device *adev)
1237 if (!adev->ip_block_status[i].valid) 1249 if (!adev->ip_block_status[i].valid)
1238 continue; 1250 continue;
1239 r = adev->ip_blocks[i].funcs->sw_init((void *)adev); 1251 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
1240 if (r) 1252 if (r) {
1253 DRM_ERROR("sw_init %d failed %d\n", i, r);
1241 return r; 1254 return r;
1255 }
1242 adev->ip_block_status[i].sw = true; 1256 adev->ip_block_status[i].sw = true;
1243 /* need to do gmc hw init early so we can allocate gpu mem */ 1257 /* need to do gmc hw init early so we can allocate gpu mem */
1244 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1258 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1245 r = amdgpu_vram_scratch_init(adev); 1259 r = amdgpu_vram_scratch_init(adev);
1246 if (r) 1260 if (r) {
1261 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1247 return r; 1262 return r;
1263 }
1248 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1264 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1249 if (r) 1265 if (r) {
1266 DRM_ERROR("hw_init %d failed %d\n", i, r);
1250 return r; 1267 return r;
1268 }
1251 r = amdgpu_wb_init(adev); 1269 r = amdgpu_wb_init(adev);
1252 if (r) 1270 if (r) {
1271 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1253 return r; 1272 return r;
1273 }
1254 adev->ip_block_status[i].hw = true; 1274 adev->ip_block_status[i].hw = true;
1255 } 1275 }
1256 } 1276 }
@@ -1262,8 +1282,10 @@ static int amdgpu_init(struct amdgpu_device *adev)
1262 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) 1282 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
1263 continue; 1283 continue;
1264 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1284 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1265 if (r) 1285 if (r) {
1286 DRM_ERROR("hw_init %d failed %d\n", i, r);
1266 return r; 1287 return r;
1288 }
1267 adev->ip_block_status[i].hw = true; 1289 adev->ip_block_status[i].hw = true;
1268 } 1290 }
1269 1291
@@ -1280,12 +1302,16 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1280 /* enable clockgating to save power */ 1302 /* enable clockgating to save power */
1281 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1303 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1282 AMD_CG_STATE_GATE); 1304 AMD_CG_STATE_GATE);
1283 if (r) 1305 if (r) {
1306 DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r);
1284 return r; 1307 return r;
1308 }
1285 if (adev->ip_blocks[i].funcs->late_init) { 1309 if (adev->ip_blocks[i].funcs->late_init) {
1286 r = adev->ip_blocks[i].funcs->late_init((void *)adev); 1310 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
1287 if (r) 1311 if (r) {
1312 DRM_ERROR("late_init %d failed %d\n", i, r);
1288 return r; 1313 return r;
1314 }
1289 } 1315 }
1290 } 1316 }
1291 1317
@@ -1306,10 +1332,15 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1306 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1332 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1307 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1333 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1308 AMD_CG_STATE_UNGATE); 1334 AMD_CG_STATE_UNGATE);
1309 if (r) 1335 if (r) {
1336 DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
1310 return r; 1337 return r;
1338 }
1311 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1339 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1312 /* XXX handle errors */ 1340 /* XXX handle errors */
1341 if (r) {
1342 DRM_DEBUG("hw_fini %d failed %d\n", i, r);
1343 }
1313 adev->ip_block_status[i].hw = false; 1344 adev->ip_block_status[i].hw = false;
1314 } 1345 }
1315 1346
@@ -1318,6 +1349,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1318 continue; 1349 continue;
1319 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); 1350 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
1320 /* XXX handle errors */ 1351 /* XXX handle errors */
1352 if (r) {
1353 DRM_DEBUG("sw_fini %d failed %d\n", i, r);
1354 }
1321 adev->ip_block_status[i].sw = false; 1355 adev->ip_block_status[i].sw = false;
1322 adev->ip_block_status[i].valid = false; 1356 adev->ip_block_status[i].valid = false;
1323 } 1357 }
@@ -1335,9 +1369,15 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
1335 /* ungate blocks so that suspend can properly shut them down */ 1369 /* ungate blocks so that suspend can properly shut them down */
1336 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1370 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1337 AMD_CG_STATE_UNGATE); 1371 AMD_CG_STATE_UNGATE);
1372 if (r) {
1373 DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
1374 }
1338 /* XXX handle errors */ 1375 /* XXX handle errors */
1339 r = adev->ip_blocks[i].funcs->suspend(adev); 1376 r = adev->ip_blocks[i].funcs->suspend(adev);
1340 /* XXX handle errors */ 1377 /* XXX handle errors */
1378 if (r) {
1379 DRM_ERROR("suspend %d failed %d\n", i, r);
1380 }
1341 } 1381 }
1342 1382
1343 return 0; 1383 return 0;
@@ -1351,8 +1391,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1351 if (!adev->ip_block_status[i].valid) 1391 if (!adev->ip_block_status[i].valid)
1352 continue; 1392 continue;
1353 r = adev->ip_blocks[i].funcs->resume(adev); 1393 r = adev->ip_blocks[i].funcs->resume(adev);
1354 if (r) 1394 if (r) {
1395 DRM_ERROR("resume %d failed %d\n", i, r);
1355 return r; 1396 return r;
1397 }
1356 } 1398 }
1357 1399
1358 return 0; 1400 return 0;
@@ -1484,8 +1526,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1484 return -EINVAL; 1526 return -EINVAL;
1485 } 1527 }
1486 r = amdgpu_atombios_init(adev); 1528 r = amdgpu_atombios_init(adev);
1487 if (r) 1529 if (r) {
1530 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1488 return r; 1531 return r;
1532 }
1489 1533
1490 /* Post card if necessary */ 1534 /* Post card if necessary */
1491 if (!amdgpu_card_posted(adev)) { 1535 if (!amdgpu_card_posted(adev)) {
@@ -1499,21 +1543,26 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1499 1543
1500 /* Initialize clocks */ 1544 /* Initialize clocks */
1501 r = amdgpu_atombios_get_clock_info(adev); 1545 r = amdgpu_atombios_get_clock_info(adev);
1502 if (r) 1546 if (r) {
1547 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1503 return r; 1548 return r;
1549 }
1504 /* init i2c buses */ 1550 /* init i2c buses */
1505 amdgpu_atombios_i2c_init(adev); 1551 amdgpu_atombios_i2c_init(adev);
1506 1552
1507 /* Fence driver */ 1553 /* Fence driver */
1508 r = amdgpu_fence_driver_init(adev); 1554 r = amdgpu_fence_driver_init(adev);
1509 if (r) 1555 if (r) {
1556 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1510 return r; 1557 return r;
1558 }
1511 1559
1512 /* init the mode config */ 1560 /* init the mode config */
1513 drm_mode_config_init(adev->ddev); 1561 drm_mode_config_init(adev->ddev);
1514 1562
1515 r = amdgpu_init(adev); 1563 r = amdgpu_init(adev);
1516 if (r) { 1564 if (r) {
1565 dev_err(adev->dev, "amdgpu_init failed\n");
1517 amdgpu_fini(adev); 1566 amdgpu_fini(adev);
1518 return r; 1567 return r;
1519 } 1568 }
@@ -1528,7 +1577,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1528 return r; 1577 return r;
1529 } 1578 }
1530 1579
1531 r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx); 1580 r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
1532 if (r) { 1581 if (r) {
1533 dev_err(adev->dev, "failed to create kernel context (%d).\n", r); 1582 dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
1534 return r; 1583 return r;
@@ -1570,8 +1619,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1570 * explicit gating rather than handling it automatically. 1619 * explicit gating rather than handling it automatically.
1571 */ 1620 */
1572 r = amdgpu_late_init(adev); 1621 r = amdgpu_late_init(adev);
1573 if (r) 1622 if (r) {
1623 dev_err(adev->dev, "amdgpu_late_init failed\n");
1574 return r; 1624 return r;
1625 }
1575 1626
1576 return 0; 1627 return 0;
1577} 1628}
@@ -1788,6 +1839,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1788 } 1839 }
1789 1840
1790 drm_kms_helper_poll_enable(dev); 1841 drm_kms_helper_poll_enable(dev);
1842 drm_helper_hpd_irq_event(dev);
1791 1843
1792 if (fbcon) { 1844 if (fbcon) {
1793 amdgpu_fbdev_set_suspend(adev, 0); 1845 amdgpu_fbdev_set_suspend(adev, 0);
@@ -1881,6 +1933,83 @@ retry:
1881 return r; 1933 return r;
1882} 1934}
1883 1935
1936void amdgpu_get_pcie_info(struct amdgpu_device *adev)
1937{
1938 u32 mask;
1939 int ret;
1940
1941 if (pci_is_root_bus(adev->pdev->bus))
1942 return;
1943
1944 if (amdgpu_pcie_gen2 == 0)
1945 return;
1946
1947 if (adev->flags & AMD_IS_APU)
1948 return;
1949
1950 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1951 if (!ret) {
1952 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
1953 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1954 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
1955
1956 if (mask & DRM_PCIE_SPEED_25)
1957 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
1958 if (mask & DRM_PCIE_SPEED_50)
1959 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
1960 if (mask & DRM_PCIE_SPEED_80)
1961 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
1962 }
1963 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
1964 if (!ret) {
1965 switch (mask) {
1966 case 32:
1967 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
1968 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1969 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1970 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1971 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1972 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1973 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1974 break;
1975 case 16:
1976 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1977 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1978 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1979 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1980 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1981 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1982 break;
1983 case 12:
1984 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1985 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1986 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1987 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1988 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1989 break;
1990 case 8:
1991 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1993 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1994 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1995 break;
1996 case 4:
1997 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1998 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1999 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2000 break;
2001 case 2:
2002 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2003 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2004 break;
2005 case 1:
2006 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2007 break;
2008 default:
2009 break;
2010 }
2011 }
2012}
1884 2013
1885/* 2014/*
1886 * Debugfs 2015 * Debugfs
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0508c5cd103a..b5dbbb573491 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,9 +79,10 @@ int amdgpu_vm_fault_stop = 0;
79int amdgpu_vm_debug = 0; 79int amdgpu_vm_debug = 0;
80int amdgpu_exp_hw_support = 0; 80int amdgpu_exp_hw_support = 0;
81int amdgpu_enable_scheduler = 1; 81int amdgpu_enable_scheduler = 1;
82int amdgpu_sched_jobs = 16; 82int amdgpu_sched_jobs = 32;
83int amdgpu_sched_hw_submission = 2; 83int amdgpu_sched_hw_submission = 2;
84int amdgpu_enable_semaphores = 0; 84int amdgpu_enable_semaphores = 0;
85int amdgpu_powerplay = -1;
85 86
86MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 87MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
87module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 88module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -155,7 +156,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
155MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)"); 156MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
156module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); 157module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
157 158
158MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)"); 159MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
159module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); 160module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
160 161
161MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 162MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
@@ -164,6 +165,11 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
164MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))"); 165MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
165module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); 166module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
166 167
168#ifdef CONFIG_DRM_AMD_POWERPLAY
169MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
170module_param_named(powerplay, amdgpu_powerplay, int, 0444);
171#endif
172
167static struct pci_device_id pciidlist[] = { 173static struct pci_device_id pciidlist[] = {
168#ifdef CONFIG_DRM_AMDGPU_CIK 174#ifdef CONFIG_DRM_AMDGPU_CIK
169 /* Kaveri */ 175 /* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 6fcbbcc2e99e..cfb6caad2a73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -263,7 +263,7 @@ out_unref:
263 263
264 } 264 }
265 if (fb && ret) { 265 if (fb && ret) {
266 drm_gem_object_unreference(gobj); 266 drm_gem_object_unreference_unlocked(gobj);
267 drm_framebuffer_unregister_private(fb); 267 drm_framebuffer_unregister_private(fb);
268 drm_framebuffer_cleanup(fb); 268 drm_framebuffer_cleanup(fb);
269 kfree(fb); 269 kfree(fb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index f6ea4b43a60c..7380f782cd14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -448,7 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
448 struct amdgpu_bo_va *bo_va, uint32_t operation) 448 struct amdgpu_bo_va *bo_va, uint32_t operation)
449{ 449{
450 struct ttm_validate_buffer tv, *entry; 450 struct ttm_validate_buffer tv, *entry;
451 struct amdgpu_bo_list_entry *vm_bos; 451 struct amdgpu_bo_list_entry vm_pd;
452 struct ww_acquire_ctx ticket; 452 struct ww_acquire_ctx ticket;
453 struct list_head list, duplicates; 453 struct list_head list, duplicates;
454 unsigned domain; 454 unsigned domain;
@@ -461,15 +461,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
461 tv.shared = true; 461 tv.shared = true;
462 list_add(&tv.head, &list); 462 list_add(&tv.head, &list);
463 463
464 vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list); 464 amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
465 if (!vm_bos)
466 return;
467 465
468 /* Provide duplicates to avoid -EALREADY */ 466 /* Provide duplicates to avoid -EALREADY */
469 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 467 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
470 if (r) 468 if (r)
471 goto error_free; 469 goto error_print;
472 470
471 amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
473 list_for_each_entry(entry, &list, head) { 472 list_for_each_entry(entry, &list, head) {
474 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); 473 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
475 /* if anything is swapped out don't swap it in here, 474 /* if anything is swapped out don't swap it in here,
@@ -477,6 +476,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
477 if (domain == AMDGPU_GEM_DOMAIN_CPU) 476 if (domain == AMDGPU_GEM_DOMAIN_CPU)
478 goto error_unreserve; 477 goto error_unreserve;
479 } 478 }
479 list_for_each_entry(entry, &duplicates, head) {
480 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
481 /* if anything is swapped out don't swap it in here,
482 just abort and wait for the next CS */
483 if (domain == AMDGPU_GEM_DOMAIN_CPU)
484 goto error_unreserve;
485 }
486
480 r = amdgpu_vm_update_page_directory(adev, bo_va->vm); 487 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
481 if (r) 488 if (r)
482 goto error_unreserve; 489 goto error_unreserve;
@@ -491,9 +498,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
491error_unreserve: 498error_unreserve:
492 ttm_eu_backoff_reservation(&ticket, &list); 499 ttm_eu_backoff_reservation(&ticket, &list);
493 500
494error_free: 501error_print:
495 drm_free_large(vm_bos);
496
497 if (r && r != -ERESTARTSYS) 502 if (r && r != -ERESTARTSYS)
498 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 503 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
499} 504}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 7c42ff670080..f594cfaa97e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,6 +25,7 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/irq.h>
28#include <drm/drmP.h> 29#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
30#include <drm/amdgpu_drm.h> 31#include <drm/amdgpu_drm.h>
@@ -312,6 +313,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
312 } 313 }
313 314
314 adev->irq.sources[src_id] = source; 315 adev->irq.sources[src_id] = source;
316
315 return 0; 317 return 0;
316} 318}
317 319
@@ -335,15 +337,19 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
335 return; 337 return;
336 } 338 }
337 339
338 src = adev->irq.sources[src_id]; 340 if (adev->irq.virq[src_id]) {
339 if (!src) { 341 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
340 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); 342 } else {
341 return; 343 src = adev->irq.sources[src_id];
342 } 344 if (!src) {
345 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
346 return;
347 }
343 348
344 r = src->funcs->process(adev, src, entry); 349 r = src->funcs->process(adev, src, entry);
345 if (r) 350 if (r)
346 DRM_ERROR("error processing interrupt (%d)\n", r); 351 DRM_ERROR("error processing interrupt (%d)\n", r);
352 }
347} 353}
348 354
349/** 355/**
@@ -461,3 +467,90 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
461 467
462 return !!atomic_read(&src->enabled_types[type]); 468 return !!atomic_read(&src->enabled_types[type]);
463} 469}
470
471/* gen irq */
472static void amdgpu_irq_mask(struct irq_data *irqd)
473{
474 /* XXX */
475}
476
477static void amdgpu_irq_unmask(struct irq_data *irqd)
478{
479 /* XXX */
480}
481
482static struct irq_chip amdgpu_irq_chip = {
483 .name = "amdgpu-ih",
484 .irq_mask = amdgpu_irq_mask,
485 .irq_unmask = amdgpu_irq_unmask,
486};
487
488static int amdgpu_irqdomain_map(struct irq_domain *d,
489 unsigned int irq, irq_hw_number_t hwirq)
490{
491 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
492 return -EPERM;
493
494 irq_set_chip_and_handler(irq,
495 &amdgpu_irq_chip, handle_simple_irq);
496 return 0;
497}
498
499static struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
500 .map = amdgpu_irqdomain_map,
501};
502
503/**
504 * amdgpu_irq_add_domain - create a linear irq domain
505 *
506 * @adev: amdgpu device pointer
507 *
508 * Create an irq domain for GPU interrupt sources
509 * that may be driven by another driver (e.g., ACP).
510 */
511int amdgpu_irq_add_domain(struct amdgpu_device *adev)
512{
513 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
514 &amdgpu_hw_irqdomain_ops, adev);
515 if (!adev->irq.domain) {
516 DRM_ERROR("GPU irq add domain failed\n");
517 return -ENODEV;
518 }
519
520 return 0;
521}
522
523/**
524 * amdgpu_irq_remove_domain - remove the irq domain
525 *
526 * @adev: amdgpu device pointer
527 *
528 * Remove the irq domain for GPU interrupt sources
529 * that may be driven by another driver (e.g., ACP).
530 */
531void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
532{
533 if (adev->irq.domain) {
534 irq_domain_remove(adev->irq.domain);
535 adev->irq.domain = NULL;
536 }
537}
538
539/**
540 * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
541 * Linux irq
542 *
543 * @adev: amdgpu device pointer
544 * @src_id: IH source id
545 *
546 * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
547 * Use this for components that generate a GPU interrupt, but are driven
548 * by a different driver (e.g., ACP).
549 * Returns the Linux irq.
550 */
551unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
552{
553 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
554
555 return adev->irq.virq[src_id];
556}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 17b01aef4278..e124b59f39c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -24,6 +24,7 @@
24#ifndef __AMDGPU_IRQ_H__ 24#ifndef __AMDGPU_IRQ_H__
25#define __AMDGPU_IRQ_H__ 25#define __AMDGPU_IRQ_H__
26 26
27#include <linux/irqdomain.h>
27#include "amdgpu_ih.h" 28#include "amdgpu_ih.h"
28 29
29#define AMDGPU_MAX_IRQ_SRC_ID 0x100 30#define AMDGPU_MAX_IRQ_SRC_ID 0x100
@@ -65,6 +66,10 @@ struct amdgpu_irq {
65 /* interrupt ring */ 66 /* interrupt ring */
66 struct amdgpu_ih_ring ih; 67 struct amdgpu_ih_ring ih;
67 const struct amdgpu_ih_funcs *ih_funcs; 68 const struct amdgpu_ih_funcs *ih_funcs;
69
70 /* gen irq stuff */
71 struct irq_domain *domain; /* GPU irq controller domain */
72 unsigned virq[AMDGPU_MAX_IRQ_SRC_ID];
68}; 73};
69 74
70void amdgpu_irq_preinstall(struct drm_device *dev); 75void amdgpu_irq_preinstall(struct drm_device *dev);
@@ -90,4 +95,8 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
90bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 95bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
91 unsigned type); 96 unsigned type);
92 97
98int amdgpu_irq_add_domain(struct amdgpu_device *adev);
99void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
100unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
101
93#endif 102#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index a53d756672fe..fdc1be8550da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -35,6 +35,7 @@
35#include <drm/drm_dp_helper.h> 35#include <drm/drm_dp_helper.h>
36#include <drm/drm_fixed.h> 36#include <drm/drm_fixed.h>
37#include <drm/drm_crtc_helper.h> 37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_fb_helper.h>
38#include <drm/drm_plane_helper.h> 39#include <drm/drm_plane_helper.h>
39#include <linux/i2c.h> 40#include <linux/i2c.h>
40#include <linux/i2c-algo-bit.h> 41#include <linux/i2c-algo-bit.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index ea756e77b023..5107fb291bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -96,6 +96,7 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
96 */ 96 */
97static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 97static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
98{ 98{
99 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
99 return bo->tbo.offset; 100 return bo->tbo.offset;
100} 101}
101 102
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 22a8c7d3a3ab..7d8d84eaea4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -30,10 +30,16 @@
30#include <linux/hwmon.h> 30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 31#include <linux/hwmon-sysfs.h>
32 32
33#include "amd_powerplay.h"
34
33static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 35static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
34 36
35void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 37void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
36{ 38{
39 if (adev->pp_enabled)
40 /* TODO */
41 return;
42
37 if (adev->pm.dpm_enabled) { 43 if (adev->pm.dpm_enabled) {
38 mutex_lock(&adev->pm.mutex); 44 mutex_lock(&adev->pm.mutex);
39 if (power_supply_is_system_supplied() > 0) 45 if (power_supply_is_system_supplied() > 0)
@@ -52,7 +58,12 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
52{ 58{
53 struct drm_device *ddev = dev_get_drvdata(dev); 59 struct drm_device *ddev = dev_get_drvdata(dev);
54 struct amdgpu_device *adev = ddev->dev_private; 60 struct amdgpu_device *adev = ddev->dev_private;
55 enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state; 61 enum amd_pm_state_type pm;
62
63 if (adev->pp_enabled) {
64 pm = amdgpu_dpm_get_current_power_state(adev);
65 } else
66 pm = adev->pm.dpm.user_state;
56 67
57 return snprintf(buf, PAGE_SIZE, "%s\n", 68 return snprintf(buf, PAGE_SIZE, "%s\n",
58 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 69 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -66,40 +77,57 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
66{ 77{
67 struct drm_device *ddev = dev_get_drvdata(dev); 78 struct drm_device *ddev = dev_get_drvdata(dev);
68 struct amdgpu_device *adev = ddev->dev_private; 79 struct amdgpu_device *adev = ddev->dev_private;
80 enum amd_pm_state_type state;
69 81
70 mutex_lock(&adev->pm.mutex);
71 if (strncmp("battery", buf, strlen("battery")) == 0) 82 if (strncmp("battery", buf, strlen("battery")) == 0)
72 adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 83 state = POWER_STATE_TYPE_BATTERY;
73 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 84 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
74 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 85 state = POWER_STATE_TYPE_BALANCED;
75 else if (strncmp("performance", buf, strlen("performance")) == 0) 86 else if (strncmp("performance", buf, strlen("performance")) == 0)
76 adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 87 state = POWER_STATE_TYPE_PERFORMANCE;
77 else { 88 else {
78 mutex_unlock(&adev->pm.mutex);
79 count = -EINVAL; 89 count = -EINVAL;
80 goto fail; 90 goto fail;
81 } 91 }
82 mutex_unlock(&adev->pm.mutex);
83 92
84 /* Can't set dpm state when the card is off */ 93 if (adev->pp_enabled) {
85 if (!(adev->flags & AMD_IS_PX) || 94 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
86 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 95 } else {
87 amdgpu_pm_compute_clocks(adev); 96 mutex_lock(&adev->pm.mutex);
97 adev->pm.dpm.user_state = state;
98 mutex_unlock(&adev->pm.mutex);
99
100 /* Can't set dpm state when the card is off */
101 if (!(adev->flags & AMD_IS_PX) ||
102 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
103 amdgpu_pm_compute_clocks(adev);
104 }
88fail: 105fail:
89 return count; 106 return count;
90} 107}
91 108
92static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 109static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
93 struct device_attribute *attr, 110 struct device_attribute *attr,
94 char *buf) 111 char *buf)
95{ 112{
96 struct drm_device *ddev = dev_get_drvdata(dev); 113 struct drm_device *ddev = dev_get_drvdata(dev);
97 struct amdgpu_device *adev = ddev->dev_private; 114 struct amdgpu_device *adev = ddev->dev_private;
98 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
99 115
100 return snprintf(buf, PAGE_SIZE, "%s\n", 116 if (adev->pp_enabled) {
101 (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" : 117 enum amd_dpm_forced_level level;
102 (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 118
119 level = amdgpu_dpm_get_performance_level(adev);
120 return snprintf(buf, PAGE_SIZE, "%s\n",
121 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
122 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
123 } else {
124 enum amdgpu_dpm_forced_level level;
125
126 level = adev->pm.dpm.forced_level;
127 return snprintf(buf, PAGE_SIZE, "%s\n",
128 (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
129 (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
130 }
103} 131}
104 132
105static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 133static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
@@ -112,7 +140,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
112 enum amdgpu_dpm_forced_level level; 140 enum amdgpu_dpm_forced_level level;
113 int ret = 0; 141 int ret = 0;
114 142
115 mutex_lock(&adev->pm.mutex);
116 if (strncmp("low", buf, strlen("low")) == 0) { 143 if (strncmp("low", buf, strlen("low")) == 0) {
117 level = AMDGPU_DPM_FORCED_LEVEL_LOW; 144 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
118 } else if (strncmp("high", buf, strlen("high")) == 0) { 145 } else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -123,7 +150,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
123 count = -EINVAL; 150 count = -EINVAL;
124 goto fail; 151 goto fail;
125 } 152 }
126 if (adev->pm.funcs->force_performance_level) { 153
154 if (adev->pp_enabled)
155 amdgpu_dpm_force_performance_level(adev, level);
156 else {
157 mutex_lock(&adev->pm.mutex);
127 if (adev->pm.dpm.thermal_active) { 158 if (adev->pm.dpm.thermal_active) {
128 count = -EINVAL; 159 count = -EINVAL;
129 goto fail; 160 goto fail;
@@ -131,6 +162,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
131 ret = amdgpu_dpm_force_performance_level(adev, level); 162 ret = amdgpu_dpm_force_performance_level(adev, level);
132 if (ret) 163 if (ret)
133 count = -EINVAL; 164 count = -EINVAL;
165 else
166 adev->pm.dpm.forced_level = level;
167 mutex_unlock(&adev->pm.mutex);
134 } 168 }
135fail: 169fail:
136 mutex_unlock(&adev->pm.mutex); 170 mutex_unlock(&adev->pm.mutex);
@@ -150,10 +184,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
150 struct amdgpu_device *adev = dev_get_drvdata(dev); 184 struct amdgpu_device *adev = dev_get_drvdata(dev);
151 int temp; 185 int temp;
152 186
153 if (adev->pm.funcs->get_temperature) 187 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
154 temp = amdgpu_dpm_get_temperature(adev);
155 else
156 temp = 0; 188 temp = 0;
189 else
190 temp = amdgpu_dpm_get_temperature(adev);
157 191
158 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 192 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
159} 193}
@@ -181,8 +215,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
181 struct amdgpu_device *adev = dev_get_drvdata(dev); 215 struct amdgpu_device *adev = dev_get_drvdata(dev);
182 u32 pwm_mode = 0; 216 u32 pwm_mode = 0;
183 217
184 if (adev->pm.funcs->get_fan_control_mode) 218 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
185 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 219 return -EINVAL;
220
221 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
186 222
187 /* never 0 (full-speed), fuse or smc-controlled always */ 223 /* never 0 (full-speed), fuse or smc-controlled always */
188 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); 224 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
@@ -197,7 +233,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
197 int err; 233 int err;
198 int value; 234 int value;
199 235
200 if(!adev->pm.funcs->set_fan_control_mode) 236 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
201 return -EINVAL; 237 return -EINVAL;
202 238
203 err = kstrtoint(buf, 10, &value); 239 err = kstrtoint(buf, 10, &value);
@@ -290,11 +326,11 @@ static struct attribute *hwmon_attributes[] = {
290static umode_t hwmon_attributes_visible(struct kobject *kobj, 326static umode_t hwmon_attributes_visible(struct kobject *kobj,
291 struct attribute *attr, int index) 327 struct attribute *attr, int index)
292{ 328{
293 struct device *dev = container_of(kobj, struct device, kobj); 329 struct device *dev = kobj_to_dev(kobj);
294 struct amdgpu_device *adev = dev_get_drvdata(dev); 330 struct amdgpu_device *adev = dev_get_drvdata(dev);
295 umode_t effective_mode = attr->mode; 331 umode_t effective_mode = attr->mode;
296 332
297 /* Skip attributes if DPM is not enabled */ 333 /* Skip limit attributes if DPM is not enabled */
298 if (!adev->pm.dpm_enabled && 334 if (!adev->pm.dpm_enabled &&
299 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 335 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
300 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 336 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
@@ -304,6 +340,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
304 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 340 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
305 return 0; 341 return 0;
306 342
343 if (adev->pp_enabled)
344 return effective_mode;
345
307 /* Skip fan attributes if fan is not present */ 346 /* Skip fan attributes if fan is not present */
308 if (adev->pm.no_fan && 347 if (adev->pm.no_fan &&
309 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 348 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
@@ -351,7 +390,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
351 container_of(work, struct amdgpu_device, 390 container_of(work, struct amdgpu_device,
352 pm.dpm.thermal.work); 391 pm.dpm.thermal.work);
353 /* switch to the thermal state */ 392 /* switch to the thermal state */
354 enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 393 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
355 394
356 if (!adev->pm.dpm_enabled) 395 if (!adev->pm.dpm_enabled)
357 return; 396 return;
@@ -379,7 +418,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
379} 418}
380 419
381static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 420static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
382 enum amdgpu_pm_state_type dpm_state) 421 enum amd_pm_state_type dpm_state)
383{ 422{
384 int i; 423 int i;
385 struct amdgpu_ps *ps; 424 struct amdgpu_ps *ps;
@@ -516,7 +555,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
516{ 555{
517 int i; 556 int i;
518 struct amdgpu_ps *ps; 557 struct amdgpu_ps *ps;
519 enum amdgpu_pm_state_type dpm_state; 558 enum amd_pm_state_type dpm_state;
520 int ret; 559 int ret;
521 560
522 /* if dpm init failed */ 561 /* if dpm init failed */
@@ -635,49 +674,54 @@ done:
635 674
636void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 675void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
637{ 676{
638 if (adev->pm.funcs->powergate_uvd) { 677 if (adev->pp_enabled)
639 mutex_lock(&adev->pm.mutex);
640 /* enable/disable UVD */
641 amdgpu_dpm_powergate_uvd(adev, !enable); 678 amdgpu_dpm_powergate_uvd(adev, !enable);
642 mutex_unlock(&adev->pm.mutex); 679 else {
643 } else { 680 if (adev->pm.funcs->powergate_uvd) {
644 if (enable) {
645 mutex_lock(&adev->pm.mutex); 681 mutex_lock(&adev->pm.mutex);
646 adev->pm.dpm.uvd_active = true; 682 /* enable/disable UVD */
647 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 683 amdgpu_dpm_powergate_uvd(adev, !enable);
648 mutex_unlock(&adev->pm.mutex); 684 mutex_unlock(&adev->pm.mutex);
649 } else { 685 } else {
650 mutex_lock(&adev->pm.mutex); 686 if (enable) {
651 adev->pm.dpm.uvd_active = false; 687 mutex_lock(&adev->pm.mutex);
652 mutex_unlock(&adev->pm.mutex); 688 adev->pm.dpm.uvd_active = true;
689 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
690 mutex_unlock(&adev->pm.mutex);
691 } else {
692 mutex_lock(&adev->pm.mutex);
693 adev->pm.dpm.uvd_active = false;
694 mutex_unlock(&adev->pm.mutex);
695 }
696 amdgpu_pm_compute_clocks(adev);
653 } 697 }
654 698
655 amdgpu_pm_compute_clocks(adev);
656 } 699 }
657} 700}
658 701
659void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 702void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
660{ 703{
661 if (adev->pm.funcs->powergate_vce) { 704 if (adev->pp_enabled)
662 mutex_lock(&adev->pm.mutex);
663 /* enable/disable VCE */
664 amdgpu_dpm_powergate_vce(adev, !enable); 705 amdgpu_dpm_powergate_vce(adev, !enable);
665 706 else {
666 mutex_unlock(&adev->pm.mutex); 707 if (adev->pm.funcs->powergate_vce) {
667 } else {
668 if (enable) {
669 mutex_lock(&adev->pm.mutex); 708 mutex_lock(&adev->pm.mutex);
670 adev->pm.dpm.vce_active = true; 709 amdgpu_dpm_powergate_vce(adev, !enable);
671 /* XXX select vce level based on ring/task */
672 adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
673 mutex_unlock(&adev->pm.mutex); 710 mutex_unlock(&adev->pm.mutex);
674 } else { 711 } else {
675 mutex_lock(&adev->pm.mutex); 712 if (enable) {
676 adev->pm.dpm.vce_active = false; 713 mutex_lock(&adev->pm.mutex);
677 mutex_unlock(&adev->pm.mutex); 714 adev->pm.dpm.vce_active = true;
715 /* XXX select vce level based on ring/task */
716 adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
717 mutex_unlock(&adev->pm.mutex);
718 } else {
719 mutex_lock(&adev->pm.mutex);
720 adev->pm.dpm.vce_active = false;
721 mutex_unlock(&adev->pm.mutex);
722 }
723 amdgpu_pm_compute_clocks(adev);
678 } 724 }
679
680 amdgpu_pm_compute_clocks(adev);
681 } 725 }
682} 726}
683 727
@@ -685,10 +729,13 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
685{ 729{
686 int i; 730 int i;
687 731
688 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 732 if (adev->pp_enabled)
689 printk("== power state %d ==\n", i); 733 /* TO DO */
734 return;
735
736 for (i = 0; i < adev->pm.dpm.num_ps; i++)
690 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 737 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
691 } 738
692} 739}
693 740
694int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 741int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
@@ -698,8 +745,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
698 if (adev->pm.sysfs_initialized) 745 if (adev->pm.sysfs_initialized)
699 return 0; 746 return 0;
700 747
701 if (adev->pm.funcs->get_temperature == NULL) 748 if (!adev->pp_enabled) {
702 return 0; 749 if (adev->pm.funcs->get_temperature == NULL)
750 return 0;
751 }
752
703 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 753 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
704 DRIVER_NAME, adev, 754 DRIVER_NAME, adev,
705 hwmon_groups); 755 hwmon_groups);
@@ -748,32 +798,43 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
748 if (!adev->pm.dpm_enabled) 798 if (!adev->pm.dpm_enabled)
749 return; 799 return;
750 800
751 mutex_lock(&adev->pm.mutex); 801 if (adev->pp_enabled) {
802 int i = 0;
752 803
753 /* update active crtc counts */ 804 amdgpu_display_bandwidth_update(adev);
754 adev->pm.dpm.new_active_crtcs = 0; 805 mutex_lock(&adev->ring_lock);
755 adev->pm.dpm.new_active_crtc_count = 0; 806 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
756 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 807 struct amdgpu_ring *ring = adev->rings[i];
757 list_for_each_entry(crtc, 808 if (ring && ring->ready)
758 &ddev->mode_config.crtc_list, head) { 809 amdgpu_fence_wait_empty(ring);
759 amdgpu_crtc = to_amdgpu_crtc(crtc);
760 if (crtc->enabled) {
761 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
762 adev->pm.dpm.new_active_crtc_count++;
763 } 810 }
764 } 811 mutex_unlock(&adev->ring_lock);
765 }
766 812
767 /* update battery/ac status */ 813 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
768 if (power_supply_is_system_supplied() > 0) 814 } else {
769 adev->pm.dpm.ac_power = true; 815 mutex_lock(&adev->pm.mutex);
770 else 816 adev->pm.dpm.new_active_crtcs = 0;
771 adev->pm.dpm.ac_power = false; 817 adev->pm.dpm.new_active_crtc_count = 0;
772 818 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
773 amdgpu_dpm_change_power_state_locked(adev); 819 list_for_each_entry(crtc,
820 &ddev->mode_config.crtc_list, head) {
821 amdgpu_crtc = to_amdgpu_crtc(crtc);
822 if (crtc->enabled) {
823 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
824 adev->pm.dpm.new_active_crtc_count++;
825 }
826 }
827 }
828 /* update battery/ac status */
829 if (power_supply_is_system_supplied() > 0)
830 adev->pm.dpm.ac_power = true;
831 else
832 adev->pm.dpm.ac_power = false;
774 833
775 mutex_unlock(&adev->pm.mutex); 834 amdgpu_dpm_change_power_state_locked(adev);
776 835
836 mutex_unlock(&adev->pm.mutex);
837 }
777} 838}
778 839
779/* 840/*
@@ -787,7 +848,13 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
787 struct drm_device *dev = node->minor->dev; 848 struct drm_device *dev = node->minor->dev;
788 struct amdgpu_device *adev = dev->dev_private; 849 struct amdgpu_device *adev = dev->dev_private;
789 850
790 if (adev->pm.dpm_enabled) { 851 if (!adev->pm.dpm_enabled) {
852 seq_printf(m, "dpm not enabled\n");
853 return 0;
854 }
855 if (adev->pp_enabled) {
856 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
857 } else {
791 mutex_lock(&adev->pm.mutex); 858 mutex_lock(&adev->pm.mutex);
792 if (adev->pm.funcs->debugfs_print_current_performance_level) 859 if (adev->pm.funcs->debugfs_print_current_performance_level)
793 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 860 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
new file mode 100644
index 000000000000..5ee9a0690278
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "atom.h"
26#include "amdgpu.h"
27#include "amd_shared.h"
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include "amdgpu_pm.h"
31#include <drm/amdgpu_drm.h>
32#include "amdgpu_powerplay.h"
33#include "cik_dpm.h"
34#include "vi_dpm.h"
35
36static int amdgpu_powerplay_init(struct amdgpu_device *adev)
37{
38 int ret = 0;
39 struct amd_powerplay *amd_pp;
40
41 amd_pp = &(adev->powerplay);
42
43 if (adev->pp_enabled) {
44#ifdef CONFIG_DRM_AMD_POWERPLAY
45 struct amd_pp_init *pp_init;
46
47 pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
48
49 if (pp_init == NULL)
50 return -ENOMEM;
51
52 pp_init->chip_family = adev->family;
53 pp_init->chip_id = adev->asic_type;
54 pp_init->device = amdgpu_cgs_create_device(adev);
55
56 ret = amd_powerplay_init(pp_init, amd_pp);
57 kfree(pp_init);
58#endif
59 } else {
60 amd_pp->pp_handle = (void *)adev;
61
62 switch (adev->asic_type) {
63#ifdef CONFIG_DRM_AMDGPU_CIK
64 case CHIP_BONAIRE:
65 case CHIP_HAWAII:
66 amd_pp->ip_funcs = &ci_dpm_ip_funcs;
67 break;
68 case CHIP_KABINI:
69 case CHIP_MULLINS:
70 case CHIP_KAVERI:
71 amd_pp->ip_funcs = &kv_dpm_ip_funcs;
72 break;
73#endif
74 case CHIP_TOPAZ:
75 amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
76 break;
77 case CHIP_TONGA:
78 amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
79 break;
80 case CHIP_FIJI:
81 amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
82 break;
83 case CHIP_CARRIZO:
84 case CHIP_STONEY:
85 amd_pp->ip_funcs = &cz_dpm_ip_funcs;
86 break;
87 default:
88 ret = -EINVAL;
89 break;
90 }
91 }
92 return ret;
93}
94
95static int amdgpu_pp_early_init(void *handle)
96{
97 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98 int ret = 0;
99
100#ifdef CONFIG_DRM_AMD_POWERPLAY
101 switch (adev->asic_type) {
102 case CHIP_TONGA:
103 case CHIP_FIJI:
104 adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
105 break;
106 default:
107 adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
108 break;
109 }
110#else
111 adev->pp_enabled = false;
112#endif
113
114 ret = amdgpu_powerplay_init(adev);
115 if (ret)
116 return ret;
117
118 if (adev->powerplay.ip_funcs->early_init)
119 ret = adev->powerplay.ip_funcs->early_init(
120 adev->powerplay.pp_handle);
121 return ret;
122}
123
124
125static int amdgpu_pp_late_init(void *handle)
126{
127 int ret = 0;
128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
129
130 if (adev->powerplay.ip_funcs->late_init)
131 ret = adev->powerplay.ip_funcs->late_init(
132 adev->powerplay.pp_handle);
133
134#ifdef CONFIG_DRM_AMD_POWERPLAY
135 if (adev->pp_enabled)
136 amdgpu_pm_sysfs_init(adev);
137#endif
138 return ret;
139}
140
141static int amdgpu_pp_sw_init(void *handle)
142{
143 int ret = 0;
144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
145
146 if (adev->powerplay.ip_funcs->sw_init)
147 ret = adev->powerplay.ip_funcs->sw_init(
148 adev->powerplay.pp_handle);
149
150#ifdef CONFIG_DRM_AMD_POWERPLAY
151 if (adev->pp_enabled) {
152 if (amdgpu_dpm == 0)
153 adev->pm.dpm_enabled = false;
154 else
155 adev->pm.dpm_enabled = true;
156 }
157#endif
158
159 return ret;
160}
161
162static int amdgpu_pp_sw_fini(void *handle)
163{
164 int ret = 0;
165 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
166
167 if (adev->powerplay.ip_funcs->sw_fini)
168 ret = adev->powerplay.ip_funcs->sw_fini(
169 adev->powerplay.pp_handle);
170 if (ret)
171 return ret;
172
173#ifdef CONFIG_DRM_AMD_POWERPLAY
174 if (adev->pp_enabled) {
175 amdgpu_pm_sysfs_fini(adev);
176 amd_powerplay_fini(adev->powerplay.pp_handle);
177 }
178#endif
179
180 return ret;
181}
182
183static int amdgpu_pp_hw_init(void *handle)
184{
185 int ret = 0;
186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
187
188 if (adev->pp_enabled && adev->firmware.smu_load)
189 amdgpu_ucode_init_bo(adev);
190
191 if (adev->powerplay.ip_funcs->hw_init)
192 ret = adev->powerplay.ip_funcs->hw_init(
193 adev->powerplay.pp_handle);
194
195 return ret;
196}
197
198static int amdgpu_pp_hw_fini(void *handle)
199{
200 int ret = 0;
201 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
202
203 if (adev->powerplay.ip_funcs->hw_fini)
204 ret = adev->powerplay.ip_funcs->hw_fini(
205 adev->powerplay.pp_handle);
206
207 if (adev->pp_enabled && adev->firmware.smu_load)
208 amdgpu_ucode_fini_bo(adev);
209
210 return ret;
211}
212
213static int amdgpu_pp_suspend(void *handle)
214{
215 int ret = 0;
216 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
217
218 if (adev->powerplay.ip_funcs->suspend)
219 ret = adev->powerplay.ip_funcs->suspend(
220 adev->powerplay.pp_handle);
221 return ret;
222}
223
224static int amdgpu_pp_resume(void *handle)
225{
226 int ret = 0;
227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
228
229 if (adev->powerplay.ip_funcs->resume)
230 ret = adev->powerplay.ip_funcs->resume(
231 adev->powerplay.pp_handle);
232 return ret;
233}
234
235static int amdgpu_pp_set_clockgating_state(void *handle,
236 enum amd_clockgating_state state)
237{
238 int ret = 0;
239 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240
241 if (adev->powerplay.ip_funcs->set_clockgating_state)
242 ret = adev->powerplay.ip_funcs->set_clockgating_state(
243 adev->powerplay.pp_handle, state);
244 return ret;
245}
246
247static int amdgpu_pp_set_powergating_state(void *handle,
248 enum amd_powergating_state state)
249{
250 int ret = 0;
251 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
252
253 if (adev->powerplay.ip_funcs->set_powergating_state)
254 ret = adev->powerplay.ip_funcs->set_powergating_state(
255 adev->powerplay.pp_handle, state);
256 return ret;
257}
258
259
260static bool amdgpu_pp_is_idle(void *handle)
261{
262 bool ret = true;
263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
264
265 if (adev->powerplay.ip_funcs->is_idle)
266 ret = adev->powerplay.ip_funcs->is_idle(
267 adev->powerplay.pp_handle);
268 return ret;
269}
270
271static int amdgpu_pp_wait_for_idle(void *handle)
272{
273 int ret = 0;
274 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
275
276 if (adev->powerplay.ip_funcs->wait_for_idle)
277 ret = adev->powerplay.ip_funcs->wait_for_idle(
278 adev->powerplay.pp_handle);
279 return ret;
280}
281
282static int amdgpu_pp_soft_reset(void *handle)
283{
284 int ret = 0;
285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
286
287 if (adev->powerplay.ip_funcs->soft_reset)
288 ret = adev->powerplay.ip_funcs->soft_reset(
289 adev->powerplay.pp_handle);
290 return ret;
291}
292
293static void amdgpu_pp_print_status(void *handle)
294{
295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
296
297 if (adev->powerplay.ip_funcs->print_status)
298 adev->powerplay.ip_funcs->print_status(
299 adev->powerplay.pp_handle);
300}
301
302const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
303 .early_init = amdgpu_pp_early_init,
304 .late_init = amdgpu_pp_late_init,
305 .sw_init = amdgpu_pp_sw_init,
306 .sw_fini = amdgpu_pp_sw_fini,
307 .hw_init = amdgpu_pp_hw_init,
308 .hw_fini = amdgpu_pp_hw_fini,
309 .suspend = amdgpu_pp_suspend,
310 .resume = amdgpu_pp_resume,
311 .is_idle = amdgpu_pp_is_idle,
312 .wait_for_idle = amdgpu_pp_wait_for_idle,
313 .soft_reset = amdgpu_pp_soft_reset,
314 .print_status = amdgpu_pp_print_status,
315 .set_clockgating_state = amdgpu_pp_set_clockgating_state,
316 .set_powergating_state = amdgpu_pp_set_powergating_state,
317};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
new file mode 100644
index 000000000000..da5cf47cfd99
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __AMDGPU_POPWERPLAY_H__
27#define __AMDGPU_POPWERPLAY_H__
28
29#include "amd_shared.h"
30
31extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
32
33#endif /* __AMDSOC_DM_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index dd005c336c97..181ce39ef5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
293 fence = to_amdgpu_fence(sync->sync_to[i]); 293 fence = to_amdgpu_fence(sync->sync_to[i]);
294 294
295 /* check if we really need to sync */ 295 /* check if we really need to sync */
296 if (!amdgpu_fence_need_sync(fence, ring)) 296 if (!amdgpu_enable_scheduler &&
297 !amdgpu_fence_need_sync(fence, ring))
297 continue; 298 continue;
298 299
299 /* prevent GPU deadlocks */ 300 /* prevent GPU deadlocks */
@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
303 } 304 }
304 305
305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { 306 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
306 r = fence_wait(&fence->base, true); 307 r = fence_wait(sync->sync_to[i], true);
307 if (r) 308 if (r)
308 return r; 309 return r;
309 continue; 310 continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b53d273eb7a1..aefc668e6b5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -75,50 +75,77 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
75} 75}
76 76
77/** 77/**
78 * amdgpu_vm_get_bos - add the vm BOs to a validation list 78 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
79 * 79 *
80 * @vm: vm providing the BOs 80 * @vm: vm providing the BOs
81 * @head: head of validation list 81 * @validated: head of validation list
82 * @entry: entry to add
82 * 83 *
83 * Add the page directory to the list of BOs to 84 * Add the page directory to the list of BOs to
84 * validate for command submission (cayman+). 85 * validate for command submission.
85 */ 86 */
86struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, 87void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
87 struct amdgpu_vm *vm, 88 struct list_head *validated,
88 struct list_head *head) 89 struct amdgpu_bo_list_entry *entry)
89{ 90{
90 struct amdgpu_bo_list_entry *list; 91 entry->robj = vm->page_directory;
91 unsigned i, idx; 92 entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
93 entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
94 entry->priority = 0;
95 entry->tv.bo = &vm->page_directory->tbo;
96 entry->tv.shared = true;
97 list_add(&entry->tv.head, validated);
98}
92 99
93 list = drm_malloc_ab(vm->max_pde_used + 2, 100/**
94 sizeof(struct amdgpu_bo_list_entry)); 101 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
95 if (!list) { 102 *
96 return NULL; 103 * @vm: vm providing the BOs
97 } 104 * @duplicates: head of duplicates list
105 *
106 * Add the page directory to the BO duplicates list
107 * for command submission.
108 */
109void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
110{
111 unsigned i;
98 112
99 /* add the vm page table to the list */ 113 /* add the vm page table to the list */
100 list[0].robj = vm->page_directory; 114 for (i = 0; i <= vm->max_pde_used; ++i) {
101 list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; 115 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
102 list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; 116
103 list[0].priority = 0; 117 if (!entry->robj)
104 list[0].tv.bo = &vm->page_directory->tbo;
105 list[0].tv.shared = true;
106 list_add(&list[0].tv.head, head);
107
108 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
109 if (!vm->page_tables[i].bo)
110 continue; 118 continue;
111 119
112 list[idx].robj = vm->page_tables[i].bo; 120 list_add(&entry->tv.head, duplicates);
113 list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
114 list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
115 list[idx].priority = 0;
116 list[idx].tv.bo = &list[idx].robj->tbo;
117 list[idx].tv.shared = true;
118 list_add(&list[idx++].tv.head, head);
119 } 121 }
120 122
121 return list; 123}
124
125/**
126 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
127 *
128 * @adev: amdgpu device instance
129 * @vm: vm providing the BOs
130 *
131 * Move the PT BOs to the tail of the LRU.
132 */
133void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
134 struct amdgpu_vm *vm)
135{
136 struct ttm_bo_global *glob = adev->mman.bdev.glob;
137 unsigned i;
138
139 spin_lock(&glob->lru_lock);
140 for (i = 0; i <= vm->max_pde_used; ++i) {
141 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
142
143 if (!entry->robj)
144 continue;
145
146 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
147 }
148 spin_unlock(&glob->lru_lock);
122} 149}
123 150
124/** 151/**
@@ -461,7 +488,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
461 488
462 /* walk over the address space and update the page directory */ 489 /* walk over the address space and update the page directory */
463 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 490 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
464 struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; 491 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
465 uint64_t pde, pt; 492 uint64_t pde, pt;
466 493
467 if (bo == NULL) 494 if (bo == NULL)
@@ -638,7 +665,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
638 /* walk over the address space and update the page tables */ 665 /* walk over the address space and update the page tables */
639 for (addr = start; addr < end; ) { 666 for (addr = start; addr < end; ) {
640 uint64_t pt_idx = addr >> amdgpu_vm_block_size; 667 uint64_t pt_idx = addr >> amdgpu_vm_block_size;
641 struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; 668 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
642 unsigned nptes; 669 unsigned nptes;
643 uint64_t pte; 670 uint64_t pte;
644 int r; 671 int r;
@@ -1010,13 +1037,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1010 return -EINVAL; 1037 return -EINVAL;
1011 1038
1012 /* make sure object fit at this offset */ 1039 /* make sure object fit at this offset */
1013 eaddr = saddr + size; 1040 eaddr = saddr + size - 1;
1014 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) 1041 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1015 return -EINVAL; 1042 return -EINVAL;
1016 1043
1017 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1044 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1018 if (last_pfn > adev->vm_manager.max_pfn) { 1045 if (last_pfn >= adev->vm_manager.max_pfn) {
1019 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", 1046 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1020 last_pfn, adev->vm_manager.max_pfn); 1047 last_pfn, adev->vm_manager.max_pfn);
1021 return -EINVAL; 1048 return -EINVAL;
1022 } 1049 }
@@ -1025,7 +1052,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1025 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1052 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1026 1053
1027 spin_lock(&vm->it_lock); 1054 spin_lock(&vm->it_lock);
1028 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); 1055 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1029 spin_unlock(&vm->it_lock); 1056 spin_unlock(&vm->it_lock);
1030 if (it) { 1057 if (it) {
1031 struct amdgpu_bo_va_mapping *tmp; 1058 struct amdgpu_bo_va_mapping *tmp;
@@ -1046,7 +1073,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1046 1073
1047 INIT_LIST_HEAD(&mapping->list); 1074 INIT_LIST_HEAD(&mapping->list);
1048 mapping->it.start = saddr; 1075 mapping->it.start = saddr;
1049 mapping->it.last = eaddr - 1; 1076 mapping->it.last = eaddr;
1050 mapping->offset = offset; 1077 mapping->offset = offset;
1051 mapping->flags = flags; 1078 mapping->flags = flags;
1052 1079
@@ -1070,9 +1097,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1070 /* walk over the address space and allocate the page tables */ 1097 /* walk over the address space and allocate the page tables */
1071 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1098 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1072 struct reservation_object *resv = vm->page_directory->tbo.resv; 1099 struct reservation_object *resv = vm->page_directory->tbo.resv;
1100 struct amdgpu_bo_list_entry *entry;
1073 struct amdgpu_bo *pt; 1101 struct amdgpu_bo *pt;
1074 1102
1075 if (vm->page_tables[pt_idx].bo) 1103 entry = &vm->page_tables[pt_idx].entry;
1104 if (entry->robj)
1076 continue; 1105 continue;
1077 1106
1078 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1107 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
@@ -1094,8 +1123,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1094 goto error_free; 1123 goto error_free;
1095 } 1124 }
1096 1125
1126 entry->robj = pt;
1127 entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
1128 entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
1129 entry->priority = 0;
1130 entry->tv.bo = &entry->robj->tbo;
1131 entry->tv.shared = true;
1097 vm->page_tables[pt_idx].addr = 0; 1132 vm->page_tables[pt_idx].addr = 0;
1098 vm->page_tables[pt_idx].bo = pt;
1099 } 1133 }
1100 1134
1101 return 0; 1135 return 0;
@@ -1326,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1326 } 1360 }
1327 1361
1328 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) 1362 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1329 amdgpu_bo_unref(&vm->page_tables[i].bo); 1363 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1330 kfree(vm->page_tables); 1364 kfree(vm->page_tables);
1331 1365
1332 amdgpu_bo_unref(&vm->page_directory); 1366 amdgpu_bo_unref(&vm->page_directory);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 92b6acadfc52..21aacc1f45c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
243 243
244/* convert bits per color to bits per pixel */ 244/* convert bits per color to bits per pixel */
245/* get bpc from the EDID */ 245/* get bpc from the EDID */
246static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc) 246static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
247{ 247{
248 if (bpc == 0) 248 if (bpc == 0)
249 return 24; 249 return 24;
@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
251 return bpc * 3; 251 return bpc * 3;
252} 252}
253 253
254/* get the max pix clock supported by the link rate and lane num */
255static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
256 int lane_num,
257 int bpp)
258{
259 return (link_rate * lane_num * 8) / bpp;
260}
261
262/***** amdgpu specific DP functions *****/ 254/***** amdgpu specific DP functions *****/
263 255
264/* First get the min lane# when low rate is used according to pixel clock 256static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
265 * (prefer low rate), second check max lane# supported by DP panel,
266 * if the max lane# < low rate lane# then use max lane# instead.
267 */
268static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
269 const u8 dpcd[DP_DPCD_SIZE], 257 const u8 dpcd[DP_DPCD_SIZE],
270 int pix_clock) 258 unsigned pix_clock,
271{ 259 unsigned *dp_lanes, unsigned *dp_rate)
272 int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
273 int max_link_rate = drm_dp_max_link_rate(dpcd);
274 int max_lane_num = drm_dp_max_lane_count(dpcd);
275 int lane_num;
276 int max_dp_pix_clock;
277
278 for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
279 max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
280 if (pix_clock <= max_dp_pix_clock)
281 break;
282 }
283
284 return lane_num;
285}
286
287static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
288 const u8 dpcd[DP_DPCD_SIZE],
289 int pix_clock)
290{ 260{
291 int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector)); 261 unsigned bpp =
292 int lane_num, max_pix_clock; 262 amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
293 263 static const unsigned link_rates[3] = { 162000, 270000, 540000 };
294 if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) == 264 unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
295 ENCODER_OBJECT_ID_NUTMEG) 265 unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
296 return 270000; 266 unsigned lane_num, i, max_pix_clock;
297 267
298 lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock); 268 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
299 max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp); 269 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
300 if (pix_clock <= max_pix_clock) 270 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
301 return 162000; 271 if (max_pix_clock >= pix_clock) {
302 max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp); 272 *dp_lanes = lane_num;
303 if (pix_clock <= max_pix_clock) 273 *dp_rate = link_rates[i];
304 return 270000; 274 return 0;
305 if (amdgpu_connector_is_dp12_capable(connector)) { 275 }
306 max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp); 276 }
307 if (pix_clock <= max_pix_clock)
308 return 540000;
309 } 277 }
310 278
311 return drm_dp_max_link_rate(dpcd); 279 return -EINVAL;
312} 280}
313 281
314static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev, 282static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
422{ 390{
423 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 391 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
424 struct amdgpu_connector_atom_dig *dig_connector; 392 struct amdgpu_connector_atom_dig *dig_connector;
393 int ret;
425 394
426 if (!amdgpu_connector->con_priv) 395 if (!amdgpu_connector->con_priv)
427 return; 396 return;
@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
429 398
430 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 399 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
431 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 400 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
432 dig_connector->dp_clock = 401 ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
433 amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); 402 mode->clock,
434 dig_connector->dp_lane_count = 403 &dig_connector->dp_lane_count,
435 amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); 404 &dig_connector->dp_clock);
405 if (ret) {
406 dig_connector->dp_clock = 0;
407 dig_connector->dp_lane_count = 0;
408 }
436 } 409 }
437} 410}
438 411
@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
441{ 414{
442 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 415 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
443 struct amdgpu_connector_atom_dig *dig_connector; 416 struct amdgpu_connector_atom_dig *dig_connector;
444 int dp_clock; 417 unsigned dp_lanes, dp_clock;
418 int ret;
445 419
446 if (!amdgpu_connector->con_priv) 420 if (!amdgpu_connector->con_priv)
447 return MODE_CLOCK_HIGH; 421 return MODE_CLOCK_HIGH;
448 dig_connector = amdgpu_connector->con_priv; 422 dig_connector = amdgpu_connector->con_priv;
449 423
450 dp_clock = 424 ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
451 amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); 425 mode->clock, &dp_lanes, &dp_clock);
426 if (ret)
427 return MODE_CLOCK_HIGH;
452 428
453 if ((dp_clock == 540000) && 429 if ((dp_clock == 540000) &&
454 (!amdgpu_connector_is_dp12_capable(connector))) 430 (!amdgpu_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 57a2e347f04d..8b4731d4e10e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -1395,7 +1395,6 @@ static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1395 ci_fan_ctrl_set_default_mode(adev); 1395 ci_fan_ctrl_set_default_mode(adev);
1396} 1396}
1397 1397
1398#if 0
1399static int ci_read_smc_soft_register(struct amdgpu_device *adev, 1398static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1400 u16 reg_offset, u32 *value) 1399 u16 reg_offset, u32 *value)
1401{ 1400{
@@ -1405,7 +1404,6 @@ static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1405 pi->soft_regs_start + reg_offset, 1404 pi->soft_regs_start + reg_offset,
1406 value, pi->sram_end); 1405 value, pi->sram_end);
1407} 1406}
1408#endif
1409 1407
1410static int ci_write_smc_soft_register(struct amdgpu_device *adev, 1408static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1411 u16 reg_offset, u32 value) 1409 u16 reg_offset, u32 value)
@@ -6084,11 +6082,23 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6084 struct amdgpu_ps *rps = &pi->current_rps; 6082 struct amdgpu_ps *rps = &pi->current_rps;
6085 u32 sclk = ci_get_average_sclk_freq(adev); 6083 u32 sclk = ci_get_average_sclk_freq(adev);
6086 u32 mclk = ci_get_average_mclk_freq(adev); 6084 u32 mclk = ci_get_average_mclk_freq(adev);
6085 u32 activity_percent = 50;
6086 int ret;
6087
6088 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6089 &activity_percent);
6090
6091 if (ret == 0) {
6092 activity_percent += 0x80;
6093 activity_percent >>= 8;
6094 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6095 }
6087 6096
6088 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); 6097 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6089 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); 6098 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6090 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 6099 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6091 sclk, mclk); 6100 sclk, mclk);
6101 seq_printf(m, "GPU load: %u %%\n", activity_percent);
6092} 6102}
6093 6103
6094static void ci_dpm_print_power_state(struct amdgpu_device *adev, 6104static void ci_dpm_print_power_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 484710cfdf82..fd9c9588ef46 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -32,6 +32,7 @@
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "cikd.h" 33#include "cikd.h"
34#include "atom.h" 34#include "atom.h"
35#include "amd_pcie.h"
35 36
36#include "cik.h" 37#include "cik.h"
37#include "gmc_v7_0.h" 38#include "gmc_v7_0.h"
@@ -65,6 +66,7 @@
65#include "oss/oss_2_0_sh_mask.h" 66#include "oss/oss_2_0_sh_mask.h"
66 67
67#include "amdgpu_amdkfd.h" 68#include "amdgpu_amdkfd.h"
69#include "amdgpu_powerplay.h"
68 70
69/* 71/*
70 * Indirect registers accessor 72 * Indirect registers accessor
@@ -929,6 +931,37 @@ static bool cik_read_disabled_bios(struct amdgpu_device *adev)
929 return r; 931 return r;
930} 932}
931 933
934static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
935 u8 *bios, u32 length_bytes)
936{
937 u32 *dw_ptr;
938 unsigned long flags;
939 u32 i, length_dw;
940
941 if (bios == NULL)
942 return false;
943 if (length_bytes == 0)
944 return false;
945 /* APU vbios image is part of sbios image */
946 if (adev->flags & AMD_IS_APU)
947 return false;
948
949 dw_ptr = (u32 *)bios;
950 length_dw = ALIGN(length_bytes, 4) / 4;
951 /* take the smc lock since we are using the smc index */
952 spin_lock_irqsave(&adev->smc_idx_lock, flags);
953 /* set rom index to 0 */
954 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
955 WREG32(mmSMC_IND_DATA_0, 0);
956 /* set index to data for continous read */
957 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
958 for (i = 0; i < length_dw; i++)
959 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
960 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
961
962 return true;
963}
964
932static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { 965static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
933 {mmGRBM_STATUS, false}, 966 {mmGRBM_STATUS, false},
934 {mmGB_ADDR_CONFIG, false}, 967 {mmGB_ADDR_CONFIG, false},
@@ -1563,8 +1596,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
1563{ 1596{
1564 struct pci_dev *root = adev->pdev->bus->self; 1597 struct pci_dev *root = adev->pdev->bus->self;
1565 int bridge_pos, gpu_pos; 1598 int bridge_pos, gpu_pos;
1566 u32 speed_cntl, mask, current_data_rate; 1599 u32 speed_cntl, current_data_rate;
1567 int ret, i; 1600 int i;
1568 u16 tmp16; 1601 u16 tmp16;
1569 1602
1570 if (pci_is_root_bus(adev->pdev->bus)) 1603 if (pci_is_root_bus(adev->pdev->bus))
@@ -1576,23 +1609,20 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
1576 if (adev->flags & AMD_IS_APU) 1609 if (adev->flags & AMD_IS_APU)
1577 return; 1610 return;
1578 1611
1579 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1612 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1580 if (ret != 0) 1613 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1581 return;
1582
1583 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1584 return; 1614 return;
1585 1615
1586 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); 1616 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
1587 current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> 1617 current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
1588 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 1618 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
1589 if (mask & DRM_PCIE_SPEED_80) { 1619 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1590 if (current_data_rate == 2) { 1620 if (current_data_rate == 2) {
1591 DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 1621 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
1592 return; 1622 return;
1593 } 1623 }
1594 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1624 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
1595 } else if (mask & DRM_PCIE_SPEED_50) { 1625 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
1596 if (current_data_rate == 1) { 1626 if (current_data_rate == 1) {
1597 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 1627 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
1598 return; 1628 return;
@@ -1608,7 +1638,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
1608 if (!gpu_pos) 1638 if (!gpu_pos)
1609 return; 1639 return;
1610 1640
1611 if (mask & DRM_PCIE_SPEED_80) { 1641 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1612 /* re-try equalization if gen3 is not already enabled */ 1642 /* re-try equalization if gen3 is not already enabled */
1613 if (current_data_rate != 2) { 1643 if (current_data_rate != 2) {
1614 u16 bridge_cfg, gpu_cfg; 1644 u16 bridge_cfg, gpu_cfg;
@@ -1703,9 +1733,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
1703 1733
1704 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); 1734 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1705 tmp16 &= ~0xf; 1735 tmp16 &= ~0xf;
1706 if (mask & DRM_PCIE_SPEED_80) 1736 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1707 tmp16 |= 3; /* gen3 */ 1737 tmp16 |= 3; /* gen3 */
1708 else if (mask & DRM_PCIE_SPEED_50) 1738 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1709 tmp16 |= 2; /* gen2 */ 1739 tmp16 |= 2; /* gen2 */
1710 else 1740 else
1711 tmp16 |= 1; /* gen1 */ 1741 tmp16 |= 1; /* gen1 */
@@ -1922,7 +1952,7 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
1922 .major = 7, 1952 .major = 7,
1923 .minor = 0, 1953 .minor = 0,
1924 .rev = 0, 1954 .rev = 0,
1925 .funcs = &ci_dpm_ip_funcs, 1955 .funcs = &amdgpu_pp_ip_funcs,
1926 }, 1956 },
1927 { 1957 {
1928 .type = AMD_IP_BLOCK_TYPE_DCE, 1958 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -1990,7 +2020,7 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
1990 .major = 7, 2020 .major = 7,
1991 .minor = 0, 2021 .minor = 0,
1992 .rev = 0, 2022 .rev = 0,
1993 .funcs = &ci_dpm_ip_funcs, 2023 .funcs = &amdgpu_pp_ip_funcs,
1994 }, 2024 },
1995 { 2025 {
1996 .type = AMD_IP_BLOCK_TYPE_DCE, 2026 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -2058,7 +2088,7 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
2058 .major = 7, 2088 .major = 7,
2059 .minor = 0, 2089 .minor = 0,
2060 .rev = 0, 2090 .rev = 0,
2061 .funcs = &kv_dpm_ip_funcs, 2091 .funcs = &amdgpu_pp_ip_funcs,
2062 }, 2092 },
2063 { 2093 {
2064 .type = AMD_IP_BLOCK_TYPE_DCE, 2094 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -2126,7 +2156,7 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
2126 .major = 7, 2156 .major = 7,
2127 .minor = 0, 2157 .minor = 0,
2128 .rev = 0, 2158 .rev = 0,
2129 .funcs = &kv_dpm_ip_funcs, 2159 .funcs = &amdgpu_pp_ip_funcs,
2130 }, 2160 },
2131 { 2161 {
2132 .type = AMD_IP_BLOCK_TYPE_DCE, 2162 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -2194,7 +2224,7 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
2194 .major = 7, 2224 .major = 7,
2195 .minor = 0, 2225 .minor = 0,
2196 .rev = 0, 2226 .rev = 0,
2197 .funcs = &kv_dpm_ip_funcs, 2227 .funcs = &amdgpu_pp_ip_funcs,
2198 }, 2228 },
2199 { 2229 {
2200 .type = AMD_IP_BLOCK_TYPE_DCE, 2230 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -2267,6 +2297,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2267static const struct amdgpu_asic_funcs cik_asic_funcs = 2297static const struct amdgpu_asic_funcs cik_asic_funcs =
2268{ 2298{
2269 .read_disabled_bios = &cik_read_disabled_bios, 2299 .read_disabled_bios = &cik_read_disabled_bios,
2300 .read_bios_from_rom = &cik_read_bios_from_rom,
2270 .read_register = &cik_read_register, 2301 .read_register = &cik_read_register,
2271 .reset = &cik_asic_reset, 2302 .reset = &cik_asic_reset,
2272 .set_vga_state = &cik_vga_set_state, 2303 .set_vga_state = &cik_vga_set_state,
@@ -2417,6 +2448,8 @@ static int cik_common_early_init(void *handle)
2417 return -EINVAL; 2448 return -EINVAL;
2418 } 2449 }
2419 2450
2451 amdgpu_get_pcie_info(adev);
2452
2420 return 0; 2453 return 0;
2421} 2454}
2422 2455
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 8993c50cb89f..30c9b3beeef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -274,6 +274,11 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev)
274static int cik_ih_early_init(void *handle) 274static int cik_ih_early_init(void *handle)
275{ 275{
276 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 276 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
277 int ret;
278
279 ret = amdgpu_irq_add_domain(adev);
280 if (ret)
281 return ret;
277 282
278 cik_ih_set_interrupt_funcs(adev); 283 cik_ih_set_interrupt_funcs(adev);
279 284
@@ -300,6 +305,7 @@ static int cik_ih_sw_fini(void *handle)
300 305
301 amdgpu_irq_fini(adev); 306 amdgpu_irq_fini(adev);
302 amdgpu_ih_ring_fini(adev); 307 amdgpu_ih_ring_fini(adev);
308 amdgpu_irq_remove_domain(adev);
303 309
304 return 0; 310 return 0;
305} 311}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 8035d4d6a4f5..4dd17f2dd905 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1078,6 +1078,37 @@ static uint32_t cz_get_eclk_level(struct amdgpu_device *adev,
1078 return i; 1078 return i;
1079} 1079}
1080 1080
1081static uint32_t cz_get_uvd_level(struct amdgpu_device *adev,
1082 uint32_t clock, uint16_t msg)
1083{
1084 int i = 0;
1085 struct amdgpu_uvd_clock_voltage_dependency_table *table =
1086 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1087
1088 switch (msg) {
1089 case PPSMC_MSG_SetUvdSoftMin:
1090 case PPSMC_MSG_SetUvdHardMin:
1091 for (i = 0; i < table->count; i++)
1092 if (clock <= table->entries[i].vclk)
1093 break;
1094 if (i == table->count)
1095 i = table->count - 1;
1096 break;
1097 case PPSMC_MSG_SetUvdSoftMax:
1098 case PPSMC_MSG_SetUvdHardMax:
1099 for (i = table->count - 1; i >= 0; i--)
1100 if (clock >= table->entries[i].vclk)
1101 break;
1102 if (i < 0)
1103 i = 0;
1104 break;
1105 default:
1106 break;
1107 }
1108
1109 return i;
1110}
1111
1081static int cz_program_bootup_state(struct amdgpu_device *adev) 1112static int cz_program_bootup_state(struct amdgpu_device *adev)
1082{ 1113{
1083 struct cz_power_info *pi = cz_get_pi(adev); 1114 struct cz_power_info *pi = cz_get_pi(adev);
@@ -1739,6 +1770,200 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1739 return 0; 1770 return 0;
1740} 1771}
1741 1772
1773static int cz_dpm_uvd_force_highest(struct amdgpu_device *adev)
1774{
1775 struct cz_power_info *pi = cz_get_pi(adev);
1776 int ret = 0;
1777
1778 if (pi->uvd_dpm.soft_min_clk != pi->uvd_dpm.soft_max_clk) {
1779 pi->uvd_dpm.soft_min_clk =
1780 pi->uvd_dpm.soft_max_clk;
1781 ret = cz_send_msg_to_smc_with_parameter(adev,
1782 PPSMC_MSG_SetUvdSoftMin,
1783 cz_get_uvd_level(adev,
1784 pi->uvd_dpm.soft_min_clk,
1785 PPSMC_MSG_SetUvdSoftMin));
1786 if (ret)
1787 return ret;
1788 }
1789
1790 return ret;
1791}
1792
1793static int cz_dpm_uvd_force_lowest(struct amdgpu_device *adev)
1794{
1795 struct cz_power_info *pi = cz_get_pi(adev);
1796 int ret = 0;
1797
1798 if (pi->uvd_dpm.soft_max_clk != pi->uvd_dpm.soft_min_clk) {
1799 pi->uvd_dpm.soft_max_clk = pi->uvd_dpm.soft_min_clk;
1800 ret = cz_send_msg_to_smc_with_parameter(adev,
1801 PPSMC_MSG_SetUvdSoftMax,
1802 cz_get_uvd_level(adev,
1803 pi->uvd_dpm.soft_max_clk,
1804 PPSMC_MSG_SetUvdSoftMax));
1805 if (ret)
1806 return ret;
1807 }
1808
1809 return ret;
1810}
1811
1812static uint32_t cz_dpm_get_max_uvd_level(struct amdgpu_device *adev)
1813{
1814 struct cz_power_info *pi = cz_get_pi(adev);
1815
1816 if (!pi->max_uvd_level) {
1817 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
1818 pi->max_uvd_level = cz_get_argument(adev) + 1;
1819 }
1820
1821 if (pi->max_uvd_level > CZ_MAX_HARDWARE_POWERLEVELS) {
1822 DRM_ERROR("Invalid max uvd level!\n");
1823 return -EINVAL;
1824 }
1825
1826 return pi->max_uvd_level;
1827}
1828
1829static int cz_dpm_unforce_uvd_dpm_levels(struct amdgpu_device *adev)
1830{
1831 struct cz_power_info *pi = cz_get_pi(adev);
1832 struct amdgpu_uvd_clock_voltage_dependency_table *dep_table =
1833 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1834 uint32_t level = 0;
1835 int ret = 0;
1836
1837 pi->uvd_dpm.soft_min_clk = dep_table->entries[0].vclk;
1838 level = cz_dpm_get_max_uvd_level(adev) - 1;
1839 if (level < dep_table->count)
1840 pi->uvd_dpm.soft_max_clk = dep_table->entries[level].vclk;
1841 else
1842 pi->uvd_dpm.soft_max_clk =
1843 dep_table->entries[dep_table->count - 1].vclk;
1844
1845 /* get min/max sclk soft value
1846 * notify SMU to execute */
1847 ret = cz_send_msg_to_smc_with_parameter(adev,
1848 PPSMC_MSG_SetUvdSoftMin,
1849 cz_get_uvd_level(adev,
1850 pi->uvd_dpm.soft_min_clk,
1851 PPSMC_MSG_SetUvdSoftMin));
1852 if (ret)
1853 return ret;
1854
1855 ret = cz_send_msg_to_smc_with_parameter(adev,
1856 PPSMC_MSG_SetUvdSoftMax,
1857 cz_get_uvd_level(adev,
1858 pi->uvd_dpm.soft_max_clk,
1859 PPSMC_MSG_SetUvdSoftMax));
1860 if (ret)
1861 return ret;
1862
1863 DRM_DEBUG("DPM uvd unforce state min=%d, max=%d.\n",
1864 pi->uvd_dpm.soft_min_clk,
1865 pi->uvd_dpm.soft_max_clk);
1866
1867 return 0;
1868}
1869
1870static int cz_dpm_vce_force_highest(struct amdgpu_device *adev)
1871{
1872 struct cz_power_info *pi = cz_get_pi(adev);
1873 int ret = 0;
1874
1875 if (pi->vce_dpm.soft_min_clk != pi->vce_dpm.soft_max_clk) {
1876 pi->vce_dpm.soft_min_clk =
1877 pi->vce_dpm.soft_max_clk;
1878 ret = cz_send_msg_to_smc_with_parameter(adev,
1879 PPSMC_MSG_SetEclkSoftMin,
1880 cz_get_eclk_level(adev,
1881 pi->vce_dpm.soft_min_clk,
1882 PPSMC_MSG_SetEclkSoftMin));
1883 if (ret)
1884 return ret;
1885 }
1886
1887 return ret;
1888}
1889
1890static int cz_dpm_vce_force_lowest(struct amdgpu_device *adev)
1891{
1892 struct cz_power_info *pi = cz_get_pi(adev);
1893 int ret = 0;
1894
1895 if (pi->vce_dpm.soft_max_clk != pi->vce_dpm.soft_min_clk) {
1896 pi->vce_dpm.soft_max_clk = pi->vce_dpm.soft_min_clk;
1897 ret = cz_send_msg_to_smc_with_parameter(adev,
1898 PPSMC_MSG_SetEclkSoftMax,
1899 cz_get_uvd_level(adev,
1900 pi->vce_dpm.soft_max_clk,
1901 PPSMC_MSG_SetEclkSoftMax));
1902 if (ret)
1903 return ret;
1904 }
1905
1906 return ret;
1907}
1908
1909static uint32_t cz_dpm_get_max_vce_level(struct amdgpu_device *adev)
1910{
1911 struct cz_power_info *pi = cz_get_pi(adev);
1912
1913 if (!pi->max_vce_level) {
1914 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
1915 pi->max_vce_level = cz_get_argument(adev) + 1;
1916 }
1917
1918 if (pi->max_vce_level > CZ_MAX_HARDWARE_POWERLEVELS) {
1919 DRM_ERROR("Invalid max vce level!\n");
1920 return -EINVAL;
1921 }
1922
1923 return pi->max_vce_level;
1924}
1925
1926static int cz_dpm_unforce_vce_dpm_levels(struct amdgpu_device *adev)
1927{
1928 struct cz_power_info *pi = cz_get_pi(adev);
1929 struct amdgpu_vce_clock_voltage_dependency_table *dep_table =
1930 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1931 uint32_t level = 0;
1932 int ret = 0;
1933
1934 pi->vce_dpm.soft_min_clk = dep_table->entries[0].ecclk;
1935 level = cz_dpm_get_max_vce_level(adev) - 1;
1936 if (level < dep_table->count)
1937 pi->vce_dpm.soft_max_clk = dep_table->entries[level].ecclk;
1938 else
1939 pi->vce_dpm.soft_max_clk =
1940 dep_table->entries[dep_table->count - 1].ecclk;
1941
1942 /* get min/max sclk soft value
1943 * notify SMU to execute */
1944 ret = cz_send_msg_to_smc_with_parameter(adev,
1945 PPSMC_MSG_SetEclkSoftMin,
1946 cz_get_eclk_level(adev,
1947 pi->vce_dpm.soft_min_clk,
1948 PPSMC_MSG_SetEclkSoftMin));
1949 if (ret)
1950 return ret;
1951
1952 ret = cz_send_msg_to_smc_with_parameter(adev,
1953 PPSMC_MSG_SetEclkSoftMax,
1954 cz_get_eclk_level(adev,
1955 pi->vce_dpm.soft_max_clk,
1956 PPSMC_MSG_SetEclkSoftMax));
1957 if (ret)
1958 return ret;
1959
1960 DRM_DEBUG("DPM vce unforce state min=%d, max=%d.\n",
1961 pi->vce_dpm.soft_min_clk,
1962 pi->vce_dpm.soft_max_clk);
1963
1964 return 0;
1965}
1966
1742static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, 1967static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1743 enum amdgpu_dpm_forced_level level) 1968 enum amdgpu_dpm_forced_level level)
1744{ 1969{
@@ -1746,25 +1971,70 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1746 1971
1747 switch (level) { 1972 switch (level) {
1748 case AMDGPU_DPM_FORCED_LEVEL_HIGH: 1973 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1974 /* sclk */
1749 ret = cz_dpm_unforce_dpm_levels(adev); 1975 ret = cz_dpm_unforce_dpm_levels(adev);
1750 if (ret) 1976 if (ret)
1751 return ret; 1977 return ret;
1752 ret = cz_dpm_force_highest(adev); 1978 ret = cz_dpm_force_highest(adev);
1753 if (ret) 1979 if (ret)
1754 return ret; 1980 return ret;
1981
1982 /* uvd */
1983 ret = cz_dpm_unforce_uvd_dpm_levels(adev);
1984 if (ret)
1985 return ret;
1986 ret = cz_dpm_uvd_force_highest(adev);
1987 if (ret)
1988 return ret;
1989
1990 /* vce */
1991 ret = cz_dpm_unforce_vce_dpm_levels(adev);
1992 if (ret)
1993 return ret;
1994 ret = cz_dpm_vce_force_highest(adev);
1995 if (ret)
1996 return ret;
1755 break; 1997 break;
1756 case AMDGPU_DPM_FORCED_LEVEL_LOW: 1998 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1999 /* sclk */
1757 ret = cz_dpm_unforce_dpm_levels(adev); 2000 ret = cz_dpm_unforce_dpm_levels(adev);
1758 if (ret) 2001 if (ret)
1759 return ret; 2002 return ret;
1760 ret = cz_dpm_force_lowest(adev); 2003 ret = cz_dpm_force_lowest(adev);
1761 if (ret) 2004 if (ret)
1762 return ret; 2005 return ret;
2006
2007 /* uvd */
2008 ret = cz_dpm_unforce_uvd_dpm_levels(adev);
2009 if (ret)
2010 return ret;
2011 ret = cz_dpm_uvd_force_lowest(adev);
2012 if (ret)
2013 return ret;
2014
2015 /* vce */
2016 ret = cz_dpm_unforce_vce_dpm_levels(adev);
2017 if (ret)
2018 return ret;
2019 ret = cz_dpm_vce_force_lowest(adev);
2020 if (ret)
2021 return ret;
1763 break; 2022 break;
1764 case AMDGPU_DPM_FORCED_LEVEL_AUTO: 2023 case AMDGPU_DPM_FORCED_LEVEL_AUTO:
2024 /* sclk */
1765 ret = cz_dpm_unforce_dpm_levels(adev); 2025 ret = cz_dpm_unforce_dpm_levels(adev);
1766 if (ret) 2026 if (ret)
1767 return ret; 2027 return ret;
2028
2029 /* uvd */
2030 ret = cz_dpm_unforce_uvd_dpm_levels(adev);
2031 if (ret)
2032 return ret;
2033
2034 /* vce */
2035 ret = cz_dpm_unforce_vce_dpm_levels(adev);
2036 if (ret)
2037 return ret;
1768 break; 2038 break;
1769 default: 2039 default:
1770 break; 2040 break;
@@ -1905,7 +2175,8 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev)
1905 pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; 2175 pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
1906 2176
1907 } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ 2177 } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
1908 pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; 2178 /* leave it as set by user */
2179 /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
1909 } 2180 }
1910 2181
1911 cz_send_msg_to_smc_with_parameter(adev, 2182 cz_send_msg_to_smc_with_parameter(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
index 99e1afc89629..5df8c1faab51 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
@@ -183,6 +183,8 @@ struct cz_power_info {
183 uint32_t voltage_drop_threshold; 183 uint32_t voltage_drop_threshold;
184 uint32_t gfx_pg_threshold; 184 uint32_t gfx_pg_threshold;
185 uint32_t max_sclk_level; 185 uint32_t max_sclk_level;
186 uint32_t max_uvd_level;
187 uint32_t max_vce_level;
186 /* flags */ 188 /* flags */
187 bool didt_enabled; 189 bool didt_enabled;
188 bool video_start; 190 bool video_start;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index bc751bfbcae2..c79638f8e732 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -253,8 +253,14 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev)
253static int cz_ih_early_init(void *handle) 253static int cz_ih_early_init(void *handle)
254{ 254{
255 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 255 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256 int ret;
257
258 ret = amdgpu_irq_add_domain(adev);
259 if (ret)
260 return ret;
256 261
257 cz_ih_set_interrupt_funcs(adev); 262 cz_ih_set_interrupt_funcs(adev);
263
258 return 0; 264 return 0;
259} 265}
260 266
@@ -278,6 +284,7 @@ static int cz_ih_sw_fini(void *handle)
278 284
279 amdgpu_irq_fini(adev); 285 amdgpu_irq_fini(adev);
280 amdgpu_ih_ring_fini(adev); 286 amdgpu_ih_ring_fini(adev);
287 amdgpu_irq_remove_domain(adev);
281 288
282 return 0; 289 return 0;
283} 290}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4dcc8fba5792..093599aba64b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3729,7 +3729,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3729 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3729 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3730 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3730 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3731 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3731 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3732 DRM_MODE_ENCODER_DAC); 3732 DRM_MODE_ENCODER_DAC, NULL);
3733 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); 3733 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3734 break; 3734 break;
3735 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3735 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3740,15 +3740,15 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3740 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3740 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3741 amdgpu_encoder->rmx_type = RMX_FULL; 3741 amdgpu_encoder->rmx_type = RMX_FULL;
3742 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3742 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3743 DRM_MODE_ENCODER_LVDS); 3743 DRM_MODE_ENCODER_LVDS, NULL);
3744 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3744 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3745 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3745 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3746 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3746 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3747 DRM_MODE_ENCODER_DAC); 3747 DRM_MODE_ENCODER_DAC, NULL);
3748 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3748 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3749 } else { 3749 } else {
3750 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3750 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3751 DRM_MODE_ENCODER_TMDS); 3751 DRM_MODE_ENCODER_TMDS, NULL);
3752 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3752 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3753 } 3753 }
3754 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); 3754 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
@@ -3766,13 +3766,13 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3766 amdgpu_encoder->is_ext_encoder = true; 3766 amdgpu_encoder->is_ext_encoder = true;
3767 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3767 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3768 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3768 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3769 DRM_MODE_ENCODER_LVDS); 3769 DRM_MODE_ENCODER_LVDS, NULL);
3770 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3770 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3771 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3771 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3772 DRM_MODE_ENCODER_DAC); 3772 DRM_MODE_ENCODER_DAC, NULL);
3773 else 3773 else
3774 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3774 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3775 DRM_MODE_ENCODER_TMDS); 3775 DRM_MODE_ENCODER_TMDS, NULL);
3776 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); 3776 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3777 break; 3777 break;
3778 } 3778 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 8f1e51128b33..8e67249d4367 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -211,9 +211,9 @@ static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
211 */ 211 */
212static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc) 212static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
213{ 213{
214 unsigned i = 0; 214 unsigned i = 100;
215 215
216 if (crtc >= adev->mode_info.num_crtc) 216 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
217 return; 217 return;
218 218
219 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) 219 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
@@ -223,14 +223,16 @@ static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
223 * wait for another frame. 223 * wait for another frame.
224 */ 224 */
225 while (dce_v11_0_is_in_vblank(adev, crtc)) { 225 while (dce_v11_0_is_in_vblank(adev, crtc)) {
226 if (i++ % 100 == 0) { 226 if (i++ == 100) {
227 i = 0;
227 if (!dce_v11_0_is_counter_moving(adev, crtc)) 228 if (!dce_v11_0_is_counter_moving(adev, crtc))
228 break; 229 break;
229 } 230 }
230 } 231 }
231 232
232 while (!dce_v11_0_is_in_vblank(adev, crtc)) { 233 while (!dce_v11_0_is_in_vblank(adev, crtc)) {
233 if (i++ % 100 == 0) { 234 if (i++ == 100) {
235 i = 0;
234 if (!dce_v11_0_is_counter_moving(adev, crtc)) 236 if (!dce_v11_0_is_counter_moving(adev, crtc))
235 break; 237 break;
236 } 238 }
@@ -239,7 +241,7 @@ static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
239 241
240static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 242static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
241{ 243{
242 if (crtc >= adev->mode_info.num_crtc) 244 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
243 return 0; 245 return 0;
244 else 246 else
245 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 247 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
@@ -3384,7 +3386,7 @@ static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3384{ 3386{
3385 u32 tmp; 3387 u32 tmp;
3386 3388
3387 if (crtc >= adev->mode_info.num_crtc) { 3389 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3388 DRM_DEBUG("invalid crtc %d\n", crtc); 3390 DRM_DEBUG("invalid crtc %d\n", crtc);
3389 return; 3391 return;
3390 } 3392 }
@@ -3399,7 +3401,7 @@ static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3399{ 3401{
3400 u32 tmp; 3402 u32 tmp;
3401 3403
3402 if (crtc >= adev->mode_info.num_crtc) { 3404 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3403 DRM_DEBUG("invalid crtc %d\n", crtc); 3405 DRM_DEBUG("invalid crtc %d\n", crtc);
3404 return; 3406 return;
3405 } 3407 }
@@ -3722,7 +3724,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3722 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3724 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3723 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3725 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3724 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3726 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3725 DRM_MODE_ENCODER_DAC); 3727 DRM_MODE_ENCODER_DAC, NULL);
3726 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); 3728 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
3727 break; 3729 break;
3728 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3730 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3733,15 +3735,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3733 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3735 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3734 amdgpu_encoder->rmx_type = RMX_FULL; 3736 amdgpu_encoder->rmx_type = RMX_FULL;
3735 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3737 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3736 DRM_MODE_ENCODER_LVDS); 3738 DRM_MODE_ENCODER_LVDS, NULL);
3737 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3739 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3738 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3740 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3739 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3741 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3740 DRM_MODE_ENCODER_DAC); 3742 DRM_MODE_ENCODER_DAC, NULL);
3741 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3743 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3742 } else { 3744 } else {
3743 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3745 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3744 DRM_MODE_ENCODER_TMDS); 3746 DRM_MODE_ENCODER_TMDS, NULL);
3745 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3747 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3746 } 3748 }
3747 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); 3749 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
@@ -3759,13 +3761,13 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3759 amdgpu_encoder->is_ext_encoder = true; 3761 amdgpu_encoder->is_ext_encoder = true;
3760 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3762 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3761 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3763 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3762 DRM_MODE_ENCODER_LVDS); 3764 DRM_MODE_ENCODER_LVDS, NULL);
3763 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3765 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3764 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3766 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3765 DRM_MODE_ENCODER_DAC); 3767 DRM_MODE_ENCODER_DAC, NULL);
3766 else 3768 else
3767 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3769 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3768 DRM_MODE_ENCODER_TMDS); 3770 DRM_MODE_ENCODER_TMDS, NULL);
3769 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); 3771 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
3770 break; 3772 break;
3771 } 3773 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 42d954dc436d..d0e128c24813 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3659,7 +3659,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3659 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3659 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3660 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3660 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3661 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3661 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3662 DRM_MODE_ENCODER_DAC); 3662 DRM_MODE_ENCODER_DAC, NULL);
3663 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); 3663 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3664 break; 3664 break;
3665 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3665 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3670,15 +3670,15 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3670 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3670 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3671 amdgpu_encoder->rmx_type = RMX_FULL; 3671 amdgpu_encoder->rmx_type = RMX_FULL;
3672 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3672 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3673 DRM_MODE_ENCODER_LVDS); 3673 DRM_MODE_ENCODER_LVDS, NULL);
3674 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3674 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3675 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3675 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3676 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3676 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3677 DRM_MODE_ENCODER_DAC); 3677 DRM_MODE_ENCODER_DAC, NULL);
3678 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3678 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3679 } else { 3679 } else {
3680 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3680 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3681 DRM_MODE_ENCODER_TMDS); 3681 DRM_MODE_ENCODER_TMDS, NULL);
3682 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3682 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3683 } 3683 }
3684 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); 3684 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
@@ -3696,13 +3696,13 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3696 amdgpu_encoder->is_ext_encoder = true; 3696 amdgpu_encoder->is_ext_encoder = true;
3697 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3697 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3698 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3698 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3699 DRM_MODE_ENCODER_LVDS); 3699 DRM_MODE_ENCODER_LVDS, NULL);
3700 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3700 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3701 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3701 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3702 DRM_MODE_ENCODER_DAC); 3702 DRM_MODE_ENCODER_DAC, NULL);
3703 else 3703 else
3704 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3704 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3705 DRM_MODE_ENCODER_TMDS); 3705 DRM_MODE_ENCODER_TMDS, NULL);
3706 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); 3706 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3707 break; 3707 break;
3708 } 3708 }
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
index 8f9845d9a986..4b0e45a27129 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -24,7 +24,7 @@
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include "drmP.h" 25#include "drmP.h"
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "fiji_smumgr.h" 27#include "fiji_smum.h"
28 28
29MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); 29MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
30 30
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
deleted file mode 100644
index 3c4824082990..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
+++ /dev/null
@@ -1,182 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_PP_SMC_H
25#define FIJI_PP_SMC_H
26
27#pragma pack(push, 1)
28
29#define PPSMC_SWSTATE_FLAG_DC 0x01
30#define PPSMC_SWSTATE_FLAG_UVD 0x02
31#define PPSMC_SWSTATE_FLAG_VCE 0x04
32
33#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
34#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
35#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
36
37#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
38#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
39#define PPSMC_SYSTEMFLAG_GDDR5 0x04
40
41#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
42
43#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
44#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
45
46#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
47#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
48
49#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
50#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
51
52#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
53#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
54#define PPSMC_DPM2FLAGS_OCP 0x04
55
56#define PPSMC_DISPLAY_WATERMARK_LOW 0
57#define PPSMC_DISPLAY_WATERMARK_HIGH 1
58
59#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
60#define PPSMC_STATEFLAG_POWERBOOST 0x02
61#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
62#define PPSMC_STATEFLAG_POWERSHIFT 0x08
63#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
64#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
65#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
66
67#define FDO_MODE_HARDWARE 0
68#define FDO_MODE_PIECE_WISE_LINEAR 1
69
70enum FAN_CONTROL {
71 FAN_CONTROL_FUZZY,
72 FAN_CONTROL_TABLE
73};
74
75//Gemini Modes
76#define PPSMC_GeminiModeNone 0 //Single GPU board
77#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board
78#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board
79
80#define PPSMC_Result_OK ((uint16_t)0x01)
81#define PPSMC_Result_NoMore ((uint16_t)0x02)
82#define PPSMC_Result_NotNow ((uint16_t)0x03)
83#define PPSMC_Result_Failed ((uint16_t)0xFF)
84#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
85#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
86
87typedef uint16_t PPSMC_Result;
88
89#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
90
91#define PPSMC_MSG_Halt ((uint16_t)0x10)
92#define PPSMC_MSG_Resume ((uint16_t)0x11)
93#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
94#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
95#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
96#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
97#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
98#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
99#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
100#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
101#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
102#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
103#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
104#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
105#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
106#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
107#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
108#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
109#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
110#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
111#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
112#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
113#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
114#define PPSMC_CACHistoryStart ((uint16_t)0x57)
115#define PPSMC_CACHistoryStop ((uint16_t)0x58)
116#define PPSMC_TDPClampingActive ((uint16_t)0x59)
117#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
118#define PPSMC_StartFanControl ((uint16_t)0x5B)
119#define PPSMC_StopFanControl ((uint16_t)0x5C)
120#define PPSMC_NoDisplay ((uint16_t)0x5D)
121#define PPSMC_HasDisplay ((uint16_t)0x5E)
122#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
123#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
124#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
125#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
126#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
127#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
128#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
129#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
130#define PPSMC_OCPActive ((uint16_t)0x6C)
131#define PPSMC_OCPInactive ((uint16_t)0x6D)
132#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
133#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
134#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
135#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
136#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
137#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
138#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
139#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
140#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
141#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
142#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
143#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
144#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
145#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
146#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
147#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
148#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
149#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
150#define PPSMC_FlushDataCache ((uint16_t)0x80)
151#define PPSMC_FlushInstrCache ((uint16_t)0x81)
152#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
153#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
154#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
155#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
156#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
157#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
158#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
159#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
160#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
161#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
162#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
163
164#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
165
166#define PPSMC_MSG_Test ((uint16_t)0x100)
167#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250)
168#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251)
169#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252)
170#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253)
171#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254)
172
173typedef uint16_t PPSMC_Msg;
174
175#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
176#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
177#define PPSMC_EVENT_STATUS_DC 0x00000004
178#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
179
180#pragma pack(pop)
181
182#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index bda1249eb871..e35340afd3db 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -25,7 +25,7 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "fiji_ppsmc.h" 27#include "fiji_ppsmc.h"
28#include "fiji_smumgr.h" 28#include "fiji_smum.h"
29#include "smu_ucode_xfer_vi.h" 29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h" 30#include "amdgpu_ucode.h"
31 31
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h b/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
index 1cef03deeac3..1cef03deeac3 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e1dcab98e249..13235d84e5a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -66,6 +66,27 @@
66#define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT) 66#define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
67#define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT) 67#define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
68 68
69#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK 0x00000001L
70#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK 0x00000002L
71#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK 0x00000004L
72#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK 0x00000008L
73#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK 0x00000010L
74#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK 0x00000020L
75
76/* BPM SERDES CMD */
77#define SET_BPM_SERDES_CMD 1
78#define CLE_BPM_SERDES_CMD 0
79
80/* BPM Register Address*/
81enum {
82 BPM_REG_CGLS_EN = 0, /* Enable/Disable CGLS */
83 BPM_REG_CGLS_ON, /* ON/OFF CGLS: shall be controlled by RLC FW */
84 BPM_REG_CGCG_OVERRIDE, /* Set/Clear CGCG Override */
85 BPM_REG_MGCG_OVERRIDE, /* Set/Clear MGCG Override */
86 BPM_REG_FGCG_OVERRIDE, /* Set/Clear FGCG Override */
87 BPM_REG_FGCG_MAX
88};
89
69MODULE_FIRMWARE("amdgpu/carrizo_ce.bin"); 90MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
70MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin"); 91MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
71MODULE_FIRMWARE("amdgpu/carrizo_me.bin"); 92MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
@@ -964,6 +985,322 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
964 return 0; 985 return 0;
965} 986}
966 987
988static const u32 vgpr_init_compute_shader[] =
989{
990 0x7e000209, 0x7e020208,
991 0x7e040207, 0x7e060206,
992 0x7e080205, 0x7e0a0204,
993 0x7e0c0203, 0x7e0e0202,
994 0x7e100201, 0x7e120200,
995 0x7e140209, 0x7e160208,
996 0x7e180207, 0x7e1a0206,
997 0x7e1c0205, 0x7e1e0204,
998 0x7e200203, 0x7e220202,
999 0x7e240201, 0x7e260200,
1000 0x7e280209, 0x7e2a0208,
1001 0x7e2c0207, 0x7e2e0206,
1002 0x7e300205, 0x7e320204,
1003 0x7e340203, 0x7e360202,
1004 0x7e380201, 0x7e3a0200,
1005 0x7e3c0209, 0x7e3e0208,
1006 0x7e400207, 0x7e420206,
1007 0x7e440205, 0x7e460204,
1008 0x7e480203, 0x7e4a0202,
1009 0x7e4c0201, 0x7e4e0200,
1010 0x7e500209, 0x7e520208,
1011 0x7e540207, 0x7e560206,
1012 0x7e580205, 0x7e5a0204,
1013 0x7e5c0203, 0x7e5e0202,
1014 0x7e600201, 0x7e620200,
1015 0x7e640209, 0x7e660208,
1016 0x7e680207, 0x7e6a0206,
1017 0x7e6c0205, 0x7e6e0204,
1018 0x7e700203, 0x7e720202,
1019 0x7e740201, 0x7e760200,
1020 0x7e780209, 0x7e7a0208,
1021 0x7e7c0207, 0x7e7e0206,
1022 0xbf8a0000, 0xbf810000,
1023};
1024
1025static const u32 sgpr_init_compute_shader[] =
1026{
1027 0xbe8a0100, 0xbe8c0102,
1028 0xbe8e0104, 0xbe900106,
1029 0xbe920108, 0xbe940100,
1030 0xbe960102, 0xbe980104,
1031 0xbe9a0106, 0xbe9c0108,
1032 0xbe9e0100, 0xbea00102,
1033 0xbea20104, 0xbea40106,
1034 0xbea60108, 0xbea80100,
1035 0xbeaa0102, 0xbeac0104,
1036 0xbeae0106, 0xbeb00108,
1037 0xbeb20100, 0xbeb40102,
1038 0xbeb60104, 0xbeb80106,
1039 0xbeba0108, 0xbebc0100,
1040 0xbebe0102, 0xbec00104,
1041 0xbec20106, 0xbec40108,
1042 0xbec60100, 0xbec80102,
1043 0xbee60004, 0xbee70005,
1044 0xbeea0006, 0xbeeb0007,
1045 0xbee80008, 0xbee90009,
1046 0xbefc0000, 0xbf8a0000,
1047 0xbf810000, 0x00000000,
1048};
1049
1050static const u32 vgpr_init_regs[] =
1051{
1052 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1053 mmCOMPUTE_RESOURCE_LIMITS, 0,
1054 mmCOMPUTE_NUM_THREAD_X, 256*4,
1055 mmCOMPUTE_NUM_THREAD_Y, 1,
1056 mmCOMPUTE_NUM_THREAD_Z, 1,
1057 mmCOMPUTE_PGM_RSRC2, 20,
1058 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1059 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1060 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1061 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1062 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1063 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1064 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1065 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1066 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1067 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1068};
1069
1070static const u32 sgpr1_init_regs[] =
1071{
1072 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1073 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1074 mmCOMPUTE_NUM_THREAD_X, 256*5,
1075 mmCOMPUTE_NUM_THREAD_Y, 1,
1076 mmCOMPUTE_NUM_THREAD_Z, 1,
1077 mmCOMPUTE_PGM_RSRC2, 20,
1078 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1079 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1080 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1081 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1082 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1083 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1084 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1085 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1086 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1087 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1088};
1089
1090static const u32 sgpr2_init_regs[] =
1091{
1092 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1093 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1094 mmCOMPUTE_NUM_THREAD_X, 256*5,
1095 mmCOMPUTE_NUM_THREAD_Y, 1,
1096 mmCOMPUTE_NUM_THREAD_Z, 1,
1097 mmCOMPUTE_PGM_RSRC2, 20,
1098 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1099 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1100 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1101 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1102 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1103 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1104 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1105 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1106 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1107 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1108};
1109
1110static const u32 sec_ded_counter_registers[] =
1111{
1112 mmCPC_EDC_ATC_CNT,
1113 mmCPC_EDC_SCRATCH_CNT,
1114 mmCPC_EDC_UCODE_CNT,
1115 mmCPF_EDC_ATC_CNT,
1116 mmCPF_EDC_ROQ_CNT,
1117 mmCPF_EDC_TAG_CNT,
1118 mmCPG_EDC_ATC_CNT,
1119 mmCPG_EDC_DMA_CNT,
1120 mmCPG_EDC_TAG_CNT,
1121 mmDC_EDC_CSINVOC_CNT,
1122 mmDC_EDC_RESTORE_CNT,
1123 mmDC_EDC_STATE_CNT,
1124 mmGDS_EDC_CNT,
1125 mmGDS_EDC_GRBM_CNT,
1126 mmGDS_EDC_OA_DED,
1127 mmSPI_EDC_CNT,
1128 mmSQC_ATC_EDC_GATCL1_CNT,
1129 mmSQC_EDC_CNT,
1130 mmSQ_EDC_DED_CNT,
1131 mmSQ_EDC_INFO,
1132 mmSQ_EDC_SEC_CNT,
1133 mmTCC_EDC_CNT,
1134 mmTCP_ATC_EDC_GATCL1_CNT,
1135 mmTCP_EDC_CNT,
1136 mmTD_EDC_CNT
1137};
1138
1139static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1140{
1141 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1142 struct amdgpu_ib ib;
1143 struct fence *f = NULL;
1144 int r, i;
1145 u32 tmp;
1146 unsigned total_size, vgpr_offset, sgpr_offset;
1147 u64 gpu_addr;
1148
1149 /* only supported on CZ */
1150 if (adev->asic_type != CHIP_CARRIZO)
1151 return 0;
1152
1153 /* bail if the compute ring is not ready */
1154 if (!ring->ready)
1155 return 0;
1156
1157 tmp = RREG32(mmGB_EDC_MODE);
1158 WREG32(mmGB_EDC_MODE, 0);
1159
1160 total_size =
1161 (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1162 total_size +=
1163 (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1164 total_size +=
1165 (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1166 total_size = ALIGN(total_size, 256);
1167 vgpr_offset = total_size;
1168 total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1169 sgpr_offset = total_size;
1170 total_size += sizeof(sgpr_init_compute_shader);
1171
1172 /* allocate an indirect buffer to put the commands in */
1173 memset(&ib, 0, sizeof(ib));
1174 r = amdgpu_ib_get(ring, NULL, total_size, &ib);
1175 if (r) {
1176 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1177 return r;
1178 }
1179
1180 /* load the compute shaders */
1181 for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1182 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1183
1184 for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1185 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1186
1187 /* init the ib length to 0 */
1188 ib.length_dw = 0;
1189
1190 /* VGPR */
1191 /* write the register state for the compute dispatch */
1192 for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1193 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1194 ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1195 ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1196 }
1197 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1198 gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1199 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1200 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1201 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1202 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1203
1204 /* write dispatch packet */
1205 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1206 ib.ptr[ib.length_dw++] = 8; /* x */
1207 ib.ptr[ib.length_dw++] = 1; /* y */
1208 ib.ptr[ib.length_dw++] = 1; /* z */
1209 ib.ptr[ib.length_dw++] =
1210 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1211
1212 /* write CS partial flush packet */
1213 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1214 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1215
1216 /* SGPR1 */
1217 /* write the register state for the compute dispatch */
1218 for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1219 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1220 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1221 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1222 }
1223 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1224 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1225 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1226 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1227 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1228 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1229
1230 /* write dispatch packet */
1231 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1232 ib.ptr[ib.length_dw++] = 8; /* x */
1233 ib.ptr[ib.length_dw++] = 1; /* y */
1234 ib.ptr[ib.length_dw++] = 1; /* z */
1235 ib.ptr[ib.length_dw++] =
1236 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1237
1238 /* write CS partial flush packet */
1239 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1240 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1241
1242 /* SGPR2 */
1243 /* write the register state for the compute dispatch */
1244 for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1245 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1246 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1247 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1248 }
1249 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1250 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1251 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1252 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1253 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1254 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1255
1256 /* write dispatch packet */
1257 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1258 ib.ptr[ib.length_dw++] = 8; /* x */
1259 ib.ptr[ib.length_dw++] = 1; /* y */
1260 ib.ptr[ib.length_dw++] = 1; /* z */
1261 ib.ptr[ib.length_dw++] =
1262 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1263
1264 /* write CS partial flush packet */
1265 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1266 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1267
1268 /* shedule the ib on the ring */
1269 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
1270 AMDGPU_FENCE_OWNER_UNDEFINED,
1271 &f);
1272 if (r) {
1273 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1274 goto fail;
1275 }
1276
1277 /* wait for the GPU to finish processing the IB */
1278 r = fence_wait(f, false);
1279 if (r) {
1280 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1281 goto fail;
1282 }
1283
1284 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1285 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1286 WREG32(mmGB_EDC_MODE, tmp);
1287
1288 tmp = RREG32(mmCC_GC_EDC_CONFIG);
1289 tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1290 WREG32(mmCC_GC_EDC_CONFIG, tmp);
1291
1292
1293 /* read back registers to clear the counters */
1294 for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1295 RREG32(sec_ded_counter_registers[i]);
1296
1297fail:
1298 fence_put(f);
1299 amdgpu_ib_free(adev, &ib);
1300
1301 return r;
1302}
1303
967static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) 1304static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
968{ 1305{
969 u32 gb_addr_config; 1306 u32 gb_addr_config;
@@ -1323,1418 +1660,923 @@ static int gfx_v8_0_sw_fini(void *handle)
1323 1660
1324static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) 1661static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1325{ 1662{
1326 const u32 num_tile_mode_states = 32; 1663 uint32_t *modearray, *mod2array;
1327 const u32 num_secondary_tile_mode_states = 16; 1664 const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1328 u32 reg_offset, gb_tile_moden, split_equal_to_row_size; 1665 const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1666 u32 reg_offset;
1329 1667
1330 switch (adev->gfx.config.mem_row_size_in_kb) { 1668 modearray = adev->gfx.config.tile_mode_array;
1331 case 1: 1669 mod2array = adev->gfx.config.macrotile_mode_array;
1332 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; 1670
1333 break; 1671 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1334 case 2: 1672 modearray[reg_offset] = 0;
1335 default: 1673
1336 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; 1674 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1337 break; 1675 mod2array[reg_offset] = 0;
1338 case 4:
1339 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1340 break;
1341 }
1342 1676
1343 switch (adev->asic_type) { 1677 switch (adev->asic_type) {
1344 case CHIP_TOPAZ: 1678 case CHIP_TOPAZ:
1345 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1679 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1346 switch (reg_offset) { 1680 PIPE_CONFIG(ADDR_SURF_P2) |
1347 case 0: 1681 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1348 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1682 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1349 PIPE_CONFIG(ADDR_SURF_P2) | 1683 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1350 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1684 PIPE_CONFIG(ADDR_SURF_P2) |
1351 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1685 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1352 break; 1686 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1353 case 1: 1687 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1354 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1688 PIPE_CONFIG(ADDR_SURF_P2) |
1355 PIPE_CONFIG(ADDR_SURF_P2) | 1689 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1356 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1690 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1357 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1691 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1358 break; 1692 PIPE_CONFIG(ADDR_SURF_P2) |
1359 case 2: 1693 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1360 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1694 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1361 PIPE_CONFIG(ADDR_SURF_P2) | 1695 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1362 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1696 PIPE_CONFIG(ADDR_SURF_P2) |
1363 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1697 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1364 break; 1698 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1365 case 3: 1699 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1366 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1700 PIPE_CONFIG(ADDR_SURF_P2) |
1367 PIPE_CONFIG(ADDR_SURF_P2) | 1701 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1368 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1702 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1369 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1703 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1370 break; 1704 PIPE_CONFIG(ADDR_SURF_P2) |
1371 case 4: 1705 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1372 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1706 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1373 PIPE_CONFIG(ADDR_SURF_P2) | 1707 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1374 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1708 PIPE_CONFIG(ADDR_SURF_P2));
1375 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1709 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1376 break; 1710 PIPE_CONFIG(ADDR_SURF_P2) |
1377 case 5: 1711 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1378 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1712 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1379 PIPE_CONFIG(ADDR_SURF_P2) | 1713 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1380 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1714 PIPE_CONFIG(ADDR_SURF_P2) |
1381 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1715 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1382 break; 1716 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1383 case 6: 1717 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1384 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1718 PIPE_CONFIG(ADDR_SURF_P2) |
1385 PIPE_CONFIG(ADDR_SURF_P2) | 1719 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1386 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1720 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1387 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1721 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1388 break; 1722 PIPE_CONFIG(ADDR_SURF_P2) |
1389 case 8: 1723 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1390 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1724 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1391 PIPE_CONFIG(ADDR_SURF_P2)); 1725 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1392 break; 1726 PIPE_CONFIG(ADDR_SURF_P2) |
1393 case 9: 1727 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1394 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1728 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1395 PIPE_CONFIG(ADDR_SURF_P2) | 1729 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1396 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1730 PIPE_CONFIG(ADDR_SURF_P2) |
1397 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1731 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1398 break; 1732 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1399 case 10: 1733 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1400 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1734 PIPE_CONFIG(ADDR_SURF_P2) |
1401 PIPE_CONFIG(ADDR_SURF_P2) | 1735 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1402 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1736 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1403 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1737 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1404 break; 1738 PIPE_CONFIG(ADDR_SURF_P2) |
1405 case 11: 1739 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1406 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1740 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1407 PIPE_CONFIG(ADDR_SURF_P2) | 1741 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1408 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1742 PIPE_CONFIG(ADDR_SURF_P2) |
1409 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1743 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1410 break; 1744 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1411 case 13: 1745 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1412 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1746 PIPE_CONFIG(ADDR_SURF_P2) |
1413 PIPE_CONFIG(ADDR_SURF_P2) | 1747 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1414 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1748 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1415 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1749 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1416 break; 1750 PIPE_CONFIG(ADDR_SURF_P2) |
1417 case 14: 1751 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1418 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1752 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1419 PIPE_CONFIG(ADDR_SURF_P2) | 1753 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1420 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1754 PIPE_CONFIG(ADDR_SURF_P2) |
1421 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1755 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1422 break; 1756 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1423 case 15: 1757 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1424 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1758 PIPE_CONFIG(ADDR_SURF_P2) |
1425 PIPE_CONFIG(ADDR_SURF_P2) | 1759 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1426 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1760 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1427 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1761 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1428 break; 1762 PIPE_CONFIG(ADDR_SURF_P2) |
1429 case 16: 1763 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1430 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1764 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1431 PIPE_CONFIG(ADDR_SURF_P2) | 1765 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1432 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1766 PIPE_CONFIG(ADDR_SURF_P2) |
1433 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1767 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1434 break; 1768 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1435 case 18: 1769 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1436 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1770 PIPE_CONFIG(ADDR_SURF_P2) |
1437 PIPE_CONFIG(ADDR_SURF_P2) | 1771 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1438 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1772 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1439 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1773 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1440 break; 1774 PIPE_CONFIG(ADDR_SURF_P2) |
1441 case 19: 1775 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1442 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1776 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1443 PIPE_CONFIG(ADDR_SURF_P2) | 1777 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1444 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1778 PIPE_CONFIG(ADDR_SURF_P2) |
1445 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1779 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1446 break; 1780 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1447 case 20: 1781
1448 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1782 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1449 PIPE_CONFIG(ADDR_SURF_P2) | 1783 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1450 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1784 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1451 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1785 NUM_BANKS(ADDR_SURF_8_BANK));
1452 break; 1786 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1453 case 21: 1787 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1454 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1788 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1455 PIPE_CONFIG(ADDR_SURF_P2) | 1789 NUM_BANKS(ADDR_SURF_8_BANK));
1456 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1790 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1457 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1791 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1458 break; 1792 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1459 case 22: 1793 NUM_BANKS(ADDR_SURF_8_BANK));
1460 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1794 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1461 PIPE_CONFIG(ADDR_SURF_P2) | 1795 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1462 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1796 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1463 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1797 NUM_BANKS(ADDR_SURF_8_BANK));
1464 break; 1798 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1465 case 24: 1799 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1466 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1800 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1467 PIPE_CONFIG(ADDR_SURF_P2) | 1801 NUM_BANKS(ADDR_SURF_8_BANK));
1468 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1802 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1469 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1803 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1470 break; 1804 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1471 case 25: 1805 NUM_BANKS(ADDR_SURF_8_BANK));
1472 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1806 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1473 PIPE_CONFIG(ADDR_SURF_P2) | 1807 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1474 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1808 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1475 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1809 NUM_BANKS(ADDR_SURF_8_BANK));
1476 break; 1810 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1477 case 26: 1811 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1478 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1812 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1479 PIPE_CONFIG(ADDR_SURF_P2) | 1813 NUM_BANKS(ADDR_SURF_16_BANK));
1480 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1814 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1481 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1815 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1482 break; 1816 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1483 case 27: 1817 NUM_BANKS(ADDR_SURF_16_BANK));
1484 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1818 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1485 PIPE_CONFIG(ADDR_SURF_P2) | 1819 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1486 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1820 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1487 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1821 NUM_BANKS(ADDR_SURF_16_BANK));
1488 break; 1822 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1489 case 28: 1823 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1490 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1824 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1491 PIPE_CONFIG(ADDR_SURF_P2) | 1825 NUM_BANKS(ADDR_SURF_16_BANK));
1492 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1826 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1493 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1827 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1494 break; 1828 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1495 case 29: 1829 NUM_BANKS(ADDR_SURF_16_BANK));
1496 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1830 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1497 PIPE_CONFIG(ADDR_SURF_P2) | 1831 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1498 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1832 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1499 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1833 NUM_BANKS(ADDR_SURF_16_BANK));
1500 break; 1834 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1501 case 7: 1835 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1502 case 12: 1836 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1503 case 17: 1837 NUM_BANKS(ADDR_SURF_8_BANK));
1504 case 23: 1838
1505 /* unused idx */ 1839 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1506 continue; 1840 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
1507 default: 1841 reg_offset != 23)
1508 gb_tile_moden = 0; 1842 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
1509 break; 1843
1510 }; 1844 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1511 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; 1845 if (reg_offset != 7)
1512 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); 1846 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
1513 } 1847
1514 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { 1848 break;
1515 switch (reg_offset) {
1516 case 0:
1517 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1520 NUM_BANKS(ADDR_SURF_8_BANK));
1521 break;
1522 case 1:
1523 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1524 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1525 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1526 NUM_BANKS(ADDR_SURF_8_BANK));
1527 break;
1528 case 2:
1529 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1532 NUM_BANKS(ADDR_SURF_8_BANK));
1533 break;
1534 case 3:
1535 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1536 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1537 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1538 NUM_BANKS(ADDR_SURF_8_BANK));
1539 break;
1540 case 4:
1541 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1542 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1543 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1544 NUM_BANKS(ADDR_SURF_8_BANK));
1545 break;
1546 case 5:
1547 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1548 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1549 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1550 NUM_BANKS(ADDR_SURF_8_BANK));
1551 break;
1552 case 6:
1553 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1554 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1555 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1556 NUM_BANKS(ADDR_SURF_8_BANK));
1557 break;
1558 case 8:
1559 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1560 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1561 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1562 NUM_BANKS(ADDR_SURF_16_BANK));
1563 break;
1564 case 9:
1565 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1566 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1567 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1568 NUM_BANKS(ADDR_SURF_16_BANK));
1569 break;
1570 case 10:
1571 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1572 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1573 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1574 NUM_BANKS(ADDR_SURF_16_BANK));
1575 break;
1576 case 11:
1577 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1578 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1579 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1580 NUM_BANKS(ADDR_SURF_16_BANK));
1581 break;
1582 case 12:
1583 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1584 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1585 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1586 NUM_BANKS(ADDR_SURF_16_BANK));
1587 break;
1588 case 13:
1589 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1590 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1591 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1592 NUM_BANKS(ADDR_SURF_16_BANK));
1593 break;
1594 case 14:
1595 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1596 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1597 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1598 NUM_BANKS(ADDR_SURF_8_BANK));
1599 break;
1600 case 7:
1601 /* unused idx */
1602 continue;
1603 default:
1604 gb_tile_moden = 0;
1605 break;
1606 };
1607 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1608 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1609 }
1610 case CHIP_FIJI: 1849 case CHIP_FIJI:
1611 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1850 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1612 switch (reg_offset) { 1851 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1613 case 0: 1852 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1614 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1853 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1615 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1854 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1616 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1855 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1856 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1618 break; 1857 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1619 case 1: 1858 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1620 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1859 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1621 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1860 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1861 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1623 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1862 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1624 break; 1863 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1625 case 2: 1864 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1626 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1865 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1627 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1866 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1628 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1867 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1629 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1868 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1630 break; 1869 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1631 case 3: 1870 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1632 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1871 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1633 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1872 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1873 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1635 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1874 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1636 break; 1875 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1637 case 4: 1876 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1638 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1877 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1639 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1878 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1640 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1879 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1641 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1880 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1642 break; 1881 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1643 case 5: 1882 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1644 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1883 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1645 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1884 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1885 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1647 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1886 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1648 break; 1887 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1649 case 6: 1888 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1650 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1889 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1651 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1890 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1652 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1891 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1653 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1892 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1654 break; 1893 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1655 case 7: 1894 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1656 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1895 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1657 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1896 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1658 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1897 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1659 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1898 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1660 break; 1899 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1661 case 8: 1900 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1662 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1901 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1663 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); 1902 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1664 break; 1903 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1665 case 9: 1904 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1666 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1905 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1667 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1906 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1668 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1907 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1669 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1908 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1670 break; 1909 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1671 case 10: 1910 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1672 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1911 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1673 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1912 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1674 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1913 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1675 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1914 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1676 break; 1915 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1677 case 11: 1916 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1678 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1917 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1679 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1918 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1680 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1919 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1681 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1920 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1682 break; 1921 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1683 case 12: 1922 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1684 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1923 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1685 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1924 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1686 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1925 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1687 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1926 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1688 break; 1927 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1689 case 13: 1928 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1690 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1929 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1691 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1930 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1692 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1931 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1693 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1932 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1694 break; 1933 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1695 case 14: 1934 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1696 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1935 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1697 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1936 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1698 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1937 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1699 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1938 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1700 break; 1939 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1701 case 15: 1940 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1702 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1941 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1703 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1942 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1704 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1943 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1705 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1944 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1706 break; 1945 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1707 case 16: 1946 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1708 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1947 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1709 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1948 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1710 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1949 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1711 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1950 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1712 break; 1951 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1713 case 17: 1952 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1714 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1953 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1715 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1954 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1716 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1955 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1717 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1956 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1718 break; 1957 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1719 case 18: 1958 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1720 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1959 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1721 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1960 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1722 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1961 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1723 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1962 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1724 break; 1963 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1725 case 19: 1964 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1726 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1965 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1727 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1966 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1728 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1967 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1729 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1968 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1730 break; 1969 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1731 case 20: 1970 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1732 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1971 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1733 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1972
1734 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1973 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1735 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1974 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1736 break; 1975 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1737 case 21: 1976 NUM_BANKS(ADDR_SURF_8_BANK));
1738 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1977 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1739 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1978 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1740 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1979 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1741 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1980 NUM_BANKS(ADDR_SURF_8_BANK));
1742 break; 1981 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1743 case 22: 1982 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1744 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1983 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1745 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1984 NUM_BANKS(ADDR_SURF_8_BANK));
1746 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1985 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1747 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1986 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1748 break; 1987 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1749 case 23: 1988 NUM_BANKS(ADDR_SURF_8_BANK));
1750 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1989 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1751 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1990 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1752 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1991 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1753 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1992 NUM_BANKS(ADDR_SURF_8_BANK));
1754 break; 1993 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1755 case 24: 1994 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1756 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1995 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1757 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1996 NUM_BANKS(ADDR_SURF_8_BANK));
1758 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1997 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1759 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1998 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1760 break; 1999 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1761 case 25: 2000 NUM_BANKS(ADDR_SURF_8_BANK));
1762 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2001 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1763 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2002 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1764 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2003 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1765 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2004 NUM_BANKS(ADDR_SURF_8_BANK));
1766 break; 2005 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1767 case 26: 2006 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1768 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2007 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1769 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2008 NUM_BANKS(ADDR_SURF_8_BANK));
1770 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2009 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1771 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2010 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1772 break; 2011 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1773 case 27: 2012 NUM_BANKS(ADDR_SURF_8_BANK));
1774 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2013 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1775 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2014 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1776 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2015 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1777 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2016 NUM_BANKS(ADDR_SURF_8_BANK));
1778 break; 2017 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1779 case 28: 2018 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1780 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2019 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1781 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2020 NUM_BANKS(ADDR_SURF_8_BANK));
1782 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2021 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1783 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2022 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1784 break; 2023 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1785 case 29: 2024 NUM_BANKS(ADDR_SURF_8_BANK));
1786 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2025 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1787 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2026 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1788 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2027 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1789 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2028 NUM_BANKS(ADDR_SURF_4_BANK));
1790 break; 2029
1791 case 30: 2030 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1792 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2031 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
1793 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2032
1794 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2033 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1795 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2034 if (reg_offset != 7)
1796 break; 2035 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
1797 default: 2036
1798 gb_tile_moden = 0;
1799 break;
1800 }
1801 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1802 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1803 }
1804 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1805 switch (reg_offset) {
1806 case 0:
1807 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1808 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1809 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1810 NUM_BANKS(ADDR_SURF_8_BANK));
1811 break;
1812 case 1:
1813 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1816 NUM_BANKS(ADDR_SURF_8_BANK));
1817 break;
1818 case 2:
1819 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1820 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1821 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1822 NUM_BANKS(ADDR_SURF_8_BANK));
1823 break;
1824 case 3:
1825 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1828 NUM_BANKS(ADDR_SURF_8_BANK));
1829 break;
1830 case 4:
1831 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1832 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1833 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1834 NUM_BANKS(ADDR_SURF_8_BANK));
1835 break;
1836 case 5:
1837 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1838 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1839 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1840 NUM_BANKS(ADDR_SURF_8_BANK));
1841 break;
1842 case 6:
1843 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1846 NUM_BANKS(ADDR_SURF_8_BANK));
1847 break;
1848 case 8:
1849 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1850 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1851 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1852 NUM_BANKS(ADDR_SURF_8_BANK));
1853 break;
1854 case 9:
1855 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1856 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1857 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1858 NUM_BANKS(ADDR_SURF_8_BANK));
1859 break;
1860 case 10:
1861 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1862 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1863 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1864 NUM_BANKS(ADDR_SURF_8_BANK));
1865 break;
1866 case 11:
1867 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1868 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1869 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1870 NUM_BANKS(ADDR_SURF_8_BANK));
1871 break;
1872 case 12:
1873 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1874 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1875 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1876 NUM_BANKS(ADDR_SURF_8_BANK));
1877 break;
1878 case 13:
1879 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1880 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1881 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1882 NUM_BANKS(ADDR_SURF_8_BANK));
1883 break;
1884 case 14:
1885 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1886 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1887 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1888 NUM_BANKS(ADDR_SURF_4_BANK));
1889 break;
1890 case 7:
1891 /* unused idx */
1892 continue;
1893 default:
1894 gb_tile_moden = 0;
1895 break;
1896 }
1897 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1898 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1899 }
1900 break; 2037 break;
1901 case CHIP_TONGA: 2038 case CHIP_TONGA:
1902 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2039 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1903 switch (reg_offset) { 2040 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1904 case 0: 2041 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1905 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2042 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1906 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2043 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1907 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2044 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1908 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2045 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1909 break; 2046 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1910 case 1: 2047 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1911 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2048 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1912 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2049 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1913 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2050 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1914 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2051 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1915 break; 2052 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1916 case 2: 2053 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1917 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2054 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1918 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2055 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1919 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2056 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1920 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2057 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1921 break; 2058 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1922 case 3: 2059 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1923 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2060 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1924 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2061 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1925 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2062 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1926 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2063 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1927 break; 2064 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1928 case 4: 2065 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1929 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2066 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1930 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2067 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1931 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2068 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1932 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2069 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1933 break; 2070 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1934 case 5: 2071 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1935 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2072 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
1936 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2073 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1937 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2074 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1938 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2075 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1939 break; 2076 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1940 case 6: 2077 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1941 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2078 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1942 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2079 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1943 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2080 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1944 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2081 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1945 break; 2082 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1946 case 7: 2083 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1947 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2084 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1948 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2085 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1949 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2086 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1950 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2087 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1951 break; 2088 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1952 case 8: 2089 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1953 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2090 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1954 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); 2091 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1955 break; 2092 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1956 case 9: 2093 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1957 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2094 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1958 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2095 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1959 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2096 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1960 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2097 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1961 break; 2098 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1962 case 10: 2099 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1963 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2100 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1964 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2101 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1965 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2102 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1966 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2103 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1967 break; 2104 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1968 case 11: 2105 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1969 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2106 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1970 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2107 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1971 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2108 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1972 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2109 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1973 break; 2110 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1974 case 12: 2111 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1975 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2112 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1976 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2113 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1977 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2114 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1978 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2115 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1979 break; 2116 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1980 case 13: 2117 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1981 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2118 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1982 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2119 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1983 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2120 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1984 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2121 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1985 break; 2122 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1986 case 14: 2123 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1987 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2124 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1988 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2125 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1989 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2126 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1990 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2127 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1991 break; 2128 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1992 case 15: 2129 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1993 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2130 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1994 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2131 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1995 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2132 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1996 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2133 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1997 break; 2134 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1998 case 16: 2135 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1999 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2136 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2000 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2137 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2001 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2138 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2002 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2139 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2003 break; 2140 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2004 case 17: 2141 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2005 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2142 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2006 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2143 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2007 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2144 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2008 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2145 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2009 break; 2146 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2010 case 18: 2147 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2011 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2148 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2012 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2149 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2013 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2150 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2014 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2151 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2015 break; 2152 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2016 case 19: 2153 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2017 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2154 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2018 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2155 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2019 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2156 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2020 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2157 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2021 break; 2158 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2022 case 20: 2159 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2023 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2160 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2024 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2161
2025 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2162 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2026 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2163 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2027 break; 2164 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2028 case 21: 2165 NUM_BANKS(ADDR_SURF_16_BANK));
2029 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2166 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2030 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2167 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2031 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2168 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2032 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2169 NUM_BANKS(ADDR_SURF_16_BANK));
2033 break; 2170 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2034 case 22: 2171 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2035 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2172 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2036 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2173 NUM_BANKS(ADDR_SURF_16_BANK));
2037 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2174 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2038 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2175 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2039 break; 2176 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2040 case 23: 2177 NUM_BANKS(ADDR_SURF_16_BANK));
2041 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2178 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2042 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2179 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2043 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2180 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2044 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2181 NUM_BANKS(ADDR_SURF_16_BANK));
2045 break; 2182 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2046 case 24: 2183 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2047 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2184 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2048 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2185 NUM_BANKS(ADDR_SURF_16_BANK));
2049 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2186 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2050 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2187 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2051 break; 2188 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2052 case 25: 2189 NUM_BANKS(ADDR_SURF_16_BANK));
2053 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2190 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2054 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2191 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2055 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2192 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2056 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2193 NUM_BANKS(ADDR_SURF_16_BANK));
2057 break; 2194 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2058 case 26: 2195 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2059 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2196 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2060 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2197 NUM_BANKS(ADDR_SURF_16_BANK));
2061 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2198 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2062 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2199 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2063 break; 2200 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2064 case 27: 2201 NUM_BANKS(ADDR_SURF_16_BANK));
2065 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2202 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2066 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2203 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2067 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2204 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2068 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2205 NUM_BANKS(ADDR_SURF_16_BANK));
2069 break; 2206 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2070 case 28: 2207 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2071 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2208 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2072 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2209 NUM_BANKS(ADDR_SURF_8_BANK));
2073 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2210 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2074 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2211 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2075 break; 2212 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2076 case 29: 2213 NUM_BANKS(ADDR_SURF_4_BANK));
2077 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2214 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2078 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2215 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2079 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2216 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2080 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2217 NUM_BANKS(ADDR_SURF_4_BANK));
2081 break; 2218
2082 case 30: 2219 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2083 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2220 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2084 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2221
2085 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2222 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2086 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2223 if (reg_offset != 7)
2087 break; 2224 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2088 default: 2225
2089 gb_tile_moden = 0;
2090 break;
2091 };
2092 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
2093 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
2094 }
2095 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2096 switch (reg_offset) {
2097 case 0:
2098 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2099 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2100 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2101 NUM_BANKS(ADDR_SURF_16_BANK));
2102 break;
2103 case 1:
2104 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2105 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2106 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2107 NUM_BANKS(ADDR_SURF_16_BANK));
2108 break;
2109 case 2:
2110 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2111 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2112 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2113 NUM_BANKS(ADDR_SURF_16_BANK));
2114 break;
2115 case 3:
2116 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2117 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2118 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2119 NUM_BANKS(ADDR_SURF_16_BANK));
2120 break;
2121 case 4:
2122 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2123 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2124 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2125 NUM_BANKS(ADDR_SURF_16_BANK));
2126 break;
2127 case 5:
2128 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2131 NUM_BANKS(ADDR_SURF_16_BANK));
2132 break;
2133 case 6:
2134 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2135 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2136 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2137 NUM_BANKS(ADDR_SURF_16_BANK));
2138 break;
2139 case 8:
2140 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2143 NUM_BANKS(ADDR_SURF_16_BANK));
2144 break;
2145 case 9:
2146 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2147 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2148 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2149 NUM_BANKS(ADDR_SURF_16_BANK));
2150 break;
2151 case 10:
2152 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2153 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2154 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2155 NUM_BANKS(ADDR_SURF_16_BANK));
2156 break;
2157 case 11:
2158 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2159 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2160 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2161 NUM_BANKS(ADDR_SURF_16_BANK));
2162 break;
2163 case 12:
2164 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2167 NUM_BANKS(ADDR_SURF_8_BANK));
2168 break;
2169 case 13:
2170 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2171 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2172 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2173 NUM_BANKS(ADDR_SURF_4_BANK));
2174 break;
2175 case 14:
2176 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2179 NUM_BANKS(ADDR_SURF_4_BANK));
2180 break;
2181 case 7:
2182 /* unused idx */
2183 continue;
2184 default:
2185 gb_tile_moden = 0;
2186 break;
2187 };
2188 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
2189 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
2190 }
2191 break; 2226 break;
2192 case CHIP_STONEY: 2227 case CHIP_STONEY:
2193 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2228 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2194 switch (reg_offset) { 2229 PIPE_CONFIG(ADDR_SURF_P2) |
2195 case 0: 2230 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2196 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2231 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2197 PIPE_CONFIG(ADDR_SURF_P2) | 2232 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2198 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2233 PIPE_CONFIG(ADDR_SURF_P2) |
2199 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2234 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2200 break; 2235 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2201 case 1: 2236 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2202 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2237 PIPE_CONFIG(ADDR_SURF_P2) |
2203 PIPE_CONFIG(ADDR_SURF_P2) | 2238 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2204 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2239 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2205 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2240 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2206 break; 2241 PIPE_CONFIG(ADDR_SURF_P2) |
2207 case 2: 2242 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2208 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2243 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2209 PIPE_CONFIG(ADDR_SURF_P2) | 2244 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2210 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2245 PIPE_CONFIG(ADDR_SURF_P2) |
2211 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2246 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2212 break; 2247 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2213 case 3: 2248 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2214 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2249 PIPE_CONFIG(ADDR_SURF_P2) |
2215 PIPE_CONFIG(ADDR_SURF_P2) | 2250 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2216 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2251 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2217 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2252 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2218 break; 2253 PIPE_CONFIG(ADDR_SURF_P2) |
2219 case 4: 2254 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2220 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2255 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2221 PIPE_CONFIG(ADDR_SURF_P2) | 2256 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2222 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2257 PIPE_CONFIG(ADDR_SURF_P2));
2223 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2258 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2224 break; 2259 PIPE_CONFIG(ADDR_SURF_P2) |
2225 case 5: 2260 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2226 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2261 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2227 PIPE_CONFIG(ADDR_SURF_P2) | 2262 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2228 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2263 PIPE_CONFIG(ADDR_SURF_P2) |
2229 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2264 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2230 break; 2265 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2231 case 6: 2266 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2232 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2267 PIPE_CONFIG(ADDR_SURF_P2) |
2233 PIPE_CONFIG(ADDR_SURF_P2) | 2268 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2234 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2269 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2235 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2270 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2236 break; 2271 PIPE_CONFIG(ADDR_SURF_P2) |
2237 case 8: 2272 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2238 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2273 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2239 PIPE_CONFIG(ADDR_SURF_P2)); 2274 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2240 break; 2275 PIPE_CONFIG(ADDR_SURF_P2) |
2241 case 9: 2276 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2242 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2277 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2243 PIPE_CONFIG(ADDR_SURF_P2) | 2278 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2244 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2279 PIPE_CONFIG(ADDR_SURF_P2) |
2245 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2280 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2246 break; 2281 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2247 case 10: 2282 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2248 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2283 PIPE_CONFIG(ADDR_SURF_P2) |
2249 PIPE_CONFIG(ADDR_SURF_P2) | 2284 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2250 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2285 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2251 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2286 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2252 break; 2287 PIPE_CONFIG(ADDR_SURF_P2) |
2253 case 11: 2288 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2254 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2289 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2255 PIPE_CONFIG(ADDR_SURF_P2) | 2290 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2256 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2291 PIPE_CONFIG(ADDR_SURF_P2) |
2257 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2292 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2258 break; 2293 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2259 case 13: 2294 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2260 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2295 PIPE_CONFIG(ADDR_SURF_P2) |
2261 PIPE_CONFIG(ADDR_SURF_P2) | 2296 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2262 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2297 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2263 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2298 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2264 break; 2299 PIPE_CONFIG(ADDR_SURF_P2) |
2265 case 14: 2300 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2266 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2301 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2267 PIPE_CONFIG(ADDR_SURF_P2) | 2302 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2268 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2303 PIPE_CONFIG(ADDR_SURF_P2) |
2269 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2304 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2270 break; 2305 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2271 case 15: 2306 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2272 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2307 PIPE_CONFIG(ADDR_SURF_P2) |
2273 PIPE_CONFIG(ADDR_SURF_P2) | 2308 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2274 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2309 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2310 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2276 break; 2311 PIPE_CONFIG(ADDR_SURF_P2) |
2277 case 16: 2312 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2278 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2313 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2279 PIPE_CONFIG(ADDR_SURF_P2) | 2314 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2280 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2315 PIPE_CONFIG(ADDR_SURF_P2) |
2281 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2316 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2282 break; 2317 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2283 case 18: 2318 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2284 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2319 PIPE_CONFIG(ADDR_SURF_P2) |
2285 PIPE_CONFIG(ADDR_SURF_P2) | 2320 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2286 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2321 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2287 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2322 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2288 break; 2323 PIPE_CONFIG(ADDR_SURF_P2) |
2289 case 19: 2324 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2290 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2325 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2291 PIPE_CONFIG(ADDR_SURF_P2) | 2326 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2292 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2327 PIPE_CONFIG(ADDR_SURF_P2) |
2293 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2328 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2294 break; 2329 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2295 case 20: 2330
2296 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2331 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2297 PIPE_CONFIG(ADDR_SURF_P2) | 2332 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2298 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2333 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2299 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2334 NUM_BANKS(ADDR_SURF_8_BANK));
2300 break; 2335 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2301 case 21: 2336 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2302 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2337 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2303 PIPE_CONFIG(ADDR_SURF_P2) | 2338 NUM_BANKS(ADDR_SURF_8_BANK));
2304 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2339 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2305 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2340 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2306 break; 2341 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2307 case 22: 2342 NUM_BANKS(ADDR_SURF_8_BANK));
2308 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2343 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2309 PIPE_CONFIG(ADDR_SURF_P2) | 2344 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2310 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2345 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2311 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2346 NUM_BANKS(ADDR_SURF_8_BANK));
2312 break; 2347 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2313 case 24: 2348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2314 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2315 PIPE_CONFIG(ADDR_SURF_P2) | 2350 NUM_BANKS(ADDR_SURF_8_BANK));
2316 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2351 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2317 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2352 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2318 break; 2353 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2319 case 25: 2354 NUM_BANKS(ADDR_SURF_8_BANK));
2320 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2355 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2321 PIPE_CONFIG(ADDR_SURF_P2) | 2356 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2322 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2357 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2323 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2358 NUM_BANKS(ADDR_SURF_8_BANK));
2324 break; 2359 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2325 case 26: 2360 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2326 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2361 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2327 PIPE_CONFIG(ADDR_SURF_P2) | 2362 NUM_BANKS(ADDR_SURF_16_BANK));
2328 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2363 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2329 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2364 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2330 break; 2365 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2331 case 27: 2366 NUM_BANKS(ADDR_SURF_16_BANK));
2332 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2367 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2333 PIPE_CONFIG(ADDR_SURF_P2) | 2368 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2334 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2369 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2335 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2370 NUM_BANKS(ADDR_SURF_16_BANK));
2336 break; 2371 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2337 case 28: 2372 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2338 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2373 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2339 PIPE_CONFIG(ADDR_SURF_P2) | 2374 NUM_BANKS(ADDR_SURF_16_BANK));
2340 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2375 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2341 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2376 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2342 break; 2377 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2343 case 29: 2378 NUM_BANKS(ADDR_SURF_16_BANK));
2344 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2379 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2345 PIPE_CONFIG(ADDR_SURF_P2) | 2380 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2346 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2381 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2347 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2382 NUM_BANKS(ADDR_SURF_16_BANK));
2348 break; 2383 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2349 case 7: 2384 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2350 case 12: 2385 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2351 case 17: 2386 NUM_BANKS(ADDR_SURF_8_BANK));
2352 case 23: 2387
2353 /* unused idx */ 2388 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2354 continue; 2389 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2355 default: 2390 reg_offset != 23)
2356 gb_tile_moden = 0; 2391 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2357 break; 2392
2358 }; 2393 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2359 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; 2394 if (reg_offset != 7)
2360 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); 2395 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2361 } 2396
2362 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2363 switch (reg_offset) {
2364 case 0:
2365 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2366 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2367 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2368 NUM_BANKS(ADDR_SURF_8_BANK));
2369 break;
2370 case 1:
2371 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2372 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2373 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2374 NUM_BANKS(ADDR_SURF_8_BANK));
2375 break;
2376 case 2:
2377 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2378 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2379 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2380 NUM_BANKS(ADDR_SURF_8_BANK));
2381 break;
2382 case 3:
2383 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2384 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2385 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2386 NUM_BANKS(ADDR_SURF_8_BANK));
2387 break;
2388 case 4:
2389 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2390 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2391 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2392 NUM_BANKS(ADDR_SURF_8_BANK));
2393 break;
2394 case 5:
2395 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2396 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2397 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2398 NUM_BANKS(ADDR_SURF_8_BANK));
2399 break;
2400 case 6:
2401 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2402 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2403 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2404 NUM_BANKS(ADDR_SURF_8_BANK));
2405 break;
2406 case 8:
2407 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2408 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2409 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2410 NUM_BANKS(ADDR_SURF_16_BANK));
2411 break;
2412 case 9:
2413 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2414 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2415 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2416 NUM_BANKS(ADDR_SURF_16_BANK));
2417 break;
2418 case 10:
2419 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2420 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2421 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2422 NUM_BANKS(ADDR_SURF_16_BANK));
2423 break;
2424 case 11:
2425 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2426 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2427 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2428 NUM_BANKS(ADDR_SURF_16_BANK));
2429 break;
2430 case 12:
2431 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2432 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2433 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2434 NUM_BANKS(ADDR_SURF_16_BANK));
2435 break;
2436 case 13:
2437 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2438 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2439 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2440 NUM_BANKS(ADDR_SURF_16_BANK));
2441 break;
2442 case 14:
2443 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2444 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2445 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2446 NUM_BANKS(ADDR_SURF_8_BANK));
2447 break;
2448 case 7:
2449 /* unused idx */
2450 continue;
2451 default:
2452 gb_tile_moden = 0;
2453 break;
2454 };
2455 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
2456 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
2457 }
2458 break; 2397 break;
2459 case CHIP_CARRIZO:
2460 default: 2398 default:
2461 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2399 dev_warn(adev->dev,
2462 switch (reg_offset) { 2400 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
2463 case 0: 2401 adev->asic_type);
2464 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2402
2465 PIPE_CONFIG(ADDR_SURF_P2) | 2403 case CHIP_CARRIZO:
2466 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2404 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2467 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2405 PIPE_CONFIG(ADDR_SURF_P2) |
2468 break; 2406 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2469 case 1: 2407 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2470 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2408 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2471 PIPE_CONFIG(ADDR_SURF_P2) | 2409 PIPE_CONFIG(ADDR_SURF_P2) |
2472 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2410 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2473 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2411 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2474 break; 2412 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2475 case 2: 2413 PIPE_CONFIG(ADDR_SURF_P2) |
2476 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2414 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2477 PIPE_CONFIG(ADDR_SURF_P2) | 2415 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2478 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2416 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2479 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2417 PIPE_CONFIG(ADDR_SURF_P2) |
2480 break; 2418 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2481 case 3: 2419 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2482 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2420 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2483 PIPE_CONFIG(ADDR_SURF_P2) | 2421 PIPE_CONFIG(ADDR_SURF_P2) |
2484 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2422 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2485 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2423 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2486 break; 2424 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2487 case 4: 2425 PIPE_CONFIG(ADDR_SURF_P2) |
2488 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2426 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2489 PIPE_CONFIG(ADDR_SURF_P2) | 2427 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2490 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2428 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2491 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2429 PIPE_CONFIG(ADDR_SURF_P2) |
2492 break; 2430 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2493 case 5: 2431 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2494 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2432 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2495 PIPE_CONFIG(ADDR_SURF_P2) | 2433 PIPE_CONFIG(ADDR_SURF_P2));
2496 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2434 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2497 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2435 PIPE_CONFIG(ADDR_SURF_P2) |
2498 break; 2436 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2499 case 6: 2437 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2500 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2438 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2501 PIPE_CONFIG(ADDR_SURF_P2) | 2439 PIPE_CONFIG(ADDR_SURF_P2) |
2502 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2440 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2503 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2441 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2504 break; 2442 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2505 case 8: 2443 PIPE_CONFIG(ADDR_SURF_P2) |
2506 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2444 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2507 PIPE_CONFIG(ADDR_SURF_P2)); 2445 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2508 break; 2446 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2509 case 9: 2447 PIPE_CONFIG(ADDR_SURF_P2) |
2510 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2448 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2511 PIPE_CONFIG(ADDR_SURF_P2) | 2449 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2512 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2450 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2513 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2451 PIPE_CONFIG(ADDR_SURF_P2) |
2514 break; 2452 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2515 case 10: 2453 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2516 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2454 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2517 PIPE_CONFIG(ADDR_SURF_P2) | 2455 PIPE_CONFIG(ADDR_SURF_P2) |
2518 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2456 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2519 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2457 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2520 break; 2458 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2521 case 11: 2459 PIPE_CONFIG(ADDR_SURF_P2) |
2522 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2460 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2523 PIPE_CONFIG(ADDR_SURF_P2) | 2461 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2524 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2462 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2525 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2463 PIPE_CONFIG(ADDR_SURF_P2) |
2526 break; 2464 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2527 case 13: 2465 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2528 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2466 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2529 PIPE_CONFIG(ADDR_SURF_P2) | 2467 PIPE_CONFIG(ADDR_SURF_P2) |
2530 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2468 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2531 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2469 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2532 break; 2470 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2533 case 14: 2471 PIPE_CONFIG(ADDR_SURF_P2) |
2534 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2472 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2535 PIPE_CONFIG(ADDR_SURF_P2) | 2473 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2536 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2474 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2537 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2475 PIPE_CONFIG(ADDR_SURF_P2) |
2538 break; 2476 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2539 case 15: 2477 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2540 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2478 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2541 PIPE_CONFIG(ADDR_SURF_P2) | 2479 PIPE_CONFIG(ADDR_SURF_P2) |
2542 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2480 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2543 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2481 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2544 break; 2482 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2545 case 16: 2483 PIPE_CONFIG(ADDR_SURF_P2) |
2546 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2484 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2547 PIPE_CONFIG(ADDR_SURF_P2) | 2485 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2548 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2486 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2549 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2487 PIPE_CONFIG(ADDR_SURF_P2) |
2550 break; 2488 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2551 case 18: 2489 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2552 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2490 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2553 PIPE_CONFIG(ADDR_SURF_P2) | 2491 PIPE_CONFIG(ADDR_SURF_P2) |
2554 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2492 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2555 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2493 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2556 break; 2494 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2557 case 19: 2495 PIPE_CONFIG(ADDR_SURF_P2) |
2558 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2496 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2559 PIPE_CONFIG(ADDR_SURF_P2) | 2497 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2560 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2498 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2561 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2499 PIPE_CONFIG(ADDR_SURF_P2) |
2562 break; 2500 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2563 case 20: 2501 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2564 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2502 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2565 PIPE_CONFIG(ADDR_SURF_P2) | 2503 PIPE_CONFIG(ADDR_SURF_P2) |
2566 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2504 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2567 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2505 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2568 break; 2506
2569 case 21: 2507 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2570 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2508 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2571 PIPE_CONFIG(ADDR_SURF_P2) | 2509 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2572 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2510 NUM_BANKS(ADDR_SURF_8_BANK));
2573 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2511 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2574 break; 2512 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2575 case 22: 2513 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2576 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2514 NUM_BANKS(ADDR_SURF_8_BANK));
2577 PIPE_CONFIG(ADDR_SURF_P2) | 2515 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2578 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2516 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2579 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2517 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2580 break; 2518 NUM_BANKS(ADDR_SURF_8_BANK));
2581 case 24: 2519 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2582 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2520 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2583 PIPE_CONFIG(ADDR_SURF_P2) | 2521 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2584 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2522 NUM_BANKS(ADDR_SURF_8_BANK));
2585 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2523 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2586 break; 2524 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2587 case 25: 2525 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2588 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2526 NUM_BANKS(ADDR_SURF_8_BANK));
2589 PIPE_CONFIG(ADDR_SURF_P2) | 2527 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2590 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2528 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2591 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2529 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2592 break; 2530 NUM_BANKS(ADDR_SURF_8_BANK));
2593 case 26: 2531 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2594 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2532 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2595 PIPE_CONFIG(ADDR_SURF_P2) | 2533 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2596 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2534 NUM_BANKS(ADDR_SURF_8_BANK));
2597 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2535 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2598 break; 2536 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2599 case 27: 2537 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2600 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2538 NUM_BANKS(ADDR_SURF_16_BANK));
2601 PIPE_CONFIG(ADDR_SURF_P2) | 2539 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2602 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2603 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2604 break; 2542 NUM_BANKS(ADDR_SURF_16_BANK));
2605 case 28: 2543 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2606 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2544 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2607 PIPE_CONFIG(ADDR_SURF_P2) | 2545 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2608 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2546 NUM_BANKS(ADDR_SURF_16_BANK));
2609 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2547 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2610 break; 2548 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2611 case 29: 2549 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2612 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2550 NUM_BANKS(ADDR_SURF_16_BANK));
2613 PIPE_CONFIG(ADDR_SURF_P2) | 2551 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2614 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2552 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2615 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2553 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2616 break; 2554 NUM_BANKS(ADDR_SURF_16_BANK));
2617 case 7: 2555 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2618 case 12: 2556 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2619 case 17: 2557 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2620 case 23: 2558 NUM_BANKS(ADDR_SURF_16_BANK));
2621 /* unused idx */ 2559 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2622 continue; 2560 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2623 default: 2561 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2624 gb_tile_moden = 0; 2562 NUM_BANKS(ADDR_SURF_8_BANK));
2625 break; 2563
2626 }; 2564 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2627 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; 2565 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2628 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); 2566 reg_offset != 23)
2629 } 2567 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2630 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { 2568
2631 switch (reg_offset) { 2569 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2632 case 0: 2570 if (reg_offset != 7)
2633 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2571 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2634 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2572
2635 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2573 break;
2636 NUM_BANKS(ADDR_SURF_8_BANK));
2637 break;
2638 case 1:
2639 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2640 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2641 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2642 NUM_BANKS(ADDR_SURF_8_BANK));
2643 break;
2644 case 2:
2645 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2646 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2647 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2648 NUM_BANKS(ADDR_SURF_8_BANK));
2649 break;
2650 case 3:
2651 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2652 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2653 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2654 NUM_BANKS(ADDR_SURF_8_BANK));
2655 break;
2656 case 4:
2657 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2658 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2659 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2660 NUM_BANKS(ADDR_SURF_8_BANK));
2661 break;
2662 case 5:
2663 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2664 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2665 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2666 NUM_BANKS(ADDR_SURF_8_BANK));
2667 break;
2668 case 6:
2669 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2670 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2671 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2672 NUM_BANKS(ADDR_SURF_8_BANK));
2673 break;
2674 case 8:
2675 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2676 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2677 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2678 NUM_BANKS(ADDR_SURF_16_BANK));
2679 break;
2680 case 9:
2681 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2682 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2683 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2684 NUM_BANKS(ADDR_SURF_16_BANK));
2685 break;
2686 case 10:
2687 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2688 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2689 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2690 NUM_BANKS(ADDR_SURF_16_BANK));
2691 break;
2692 case 11:
2693 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2694 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2695 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2696 NUM_BANKS(ADDR_SURF_16_BANK));
2697 break;
2698 case 12:
2699 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2700 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2701 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2702 NUM_BANKS(ADDR_SURF_16_BANK));
2703 break;
2704 case 13:
2705 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2706 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2707 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2708 NUM_BANKS(ADDR_SURF_16_BANK));
2709 break;
2710 case 14:
2711 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2712 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2713 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2714 NUM_BANKS(ADDR_SURF_8_BANK));
2715 break;
2716 case 7:
2717 /* unused idx */
2718 continue;
2719 default:
2720 gb_tile_moden = 0;
2721 break;
2722 };
2723 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
2724 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
2725 }
2726 } 2574 }
2727} 2575}
2728 2576
2729static u32 gfx_v8_0_create_bitmask(u32 bit_width) 2577static u32 gfx_v8_0_create_bitmask(u32 bit_width)
2730{ 2578{
2731 u32 i, mask = 0; 2579 return (u32)((1ULL << bit_width) - 1);
2732
2733 for (i = 0; i < bit_width; i++) {
2734 mask <<= 1;
2735 mask |= 1;
2736 }
2737 return mask;
2738} 2580}
2739 2581
2740void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) 2582void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
@@ -2809,7 +2651,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
2809 mutex_lock(&adev->grbm_idx_mutex); 2651 mutex_lock(&adev->grbm_idx_mutex);
2810 for (i = 0; i < se_num; i++) { 2652 for (i = 0; i < se_num; i++) {
2811 gfx_v8_0_select_se_sh(adev, i, 0xffffffff); 2653 gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
2812 data = 0; 2654 data = RREG32(mmPA_SC_RASTER_CONFIG);
2813 for (j = 0; j < sh_per_se; j++) { 2655 for (j = 0; j < sh_per_se; j++) {
2814 switch (enabled_rbs & 3) { 2656 switch (enabled_rbs & 3) {
2815 case 0: 2657 case 0:
@@ -2997,17 +2839,11 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2997{ 2839{
2998 u32 tmp = RREG32(mmCP_INT_CNTL_RING0); 2840 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
2999 2841
3000 if (enable) { 2842 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3001 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1); 2843 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3002 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1); 2844 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3003 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1); 2845 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3004 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1); 2846
3005 } else {
3006 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 0);
3007 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 0);
3008 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 0);
3009 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 0);
3010 }
3011 WREG32(mmCP_INT_CNTL_RING0, tmp); 2847 WREG32(mmCP_INT_CNTL_RING0, tmp);
3012} 2848}
3013 2849
@@ -3087,16 +2923,18 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
3087 2923
3088 gfx_v8_0_rlc_reset(adev); 2924 gfx_v8_0_rlc_reset(adev);
3089 2925
3090 if (!adev->firmware.smu_load) { 2926 if (!adev->pp_enabled) {
3091 /* legacy rlc firmware loading */ 2927 if (!adev->firmware.smu_load) {
3092 r = gfx_v8_0_rlc_load_microcode(adev); 2928 /* legacy rlc firmware loading */
3093 if (r) 2929 r = gfx_v8_0_rlc_load_microcode(adev);
3094 return r; 2930 if (r)
3095 } else { 2931 return r;
3096 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 2932 } else {
3097 AMDGPU_UCODE_ID_RLC_G); 2933 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3098 if (r) 2934 AMDGPU_UCODE_ID_RLC_G);
3099 return -EINVAL; 2935 if (r)
2936 return -EINVAL;
2937 }
3100 } 2938 }
3101 2939
3102 gfx_v8_0_rlc_start(adev); 2940 gfx_v8_0_rlc_start(adev);
@@ -3941,6 +3779,11 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3941 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 3779 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3942 WREG32(mmCP_HQD_PERSISTENT_STATE, tmp); 3780 WREG32(mmCP_HQD_PERSISTENT_STATE, tmp);
3943 mqd->cp_hqd_persistent_state = tmp; 3781 mqd->cp_hqd_persistent_state = tmp;
3782 if (adev->asic_type == CHIP_STONEY) {
3783 tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
3784 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
3785 WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
3786 }
3944 3787
3945 /* activate the queue */ 3788 /* activate the queue */
3946 mqd->cp_hqd_active = 1; 3789 mqd->cp_hqd_active = 1;
@@ -3982,35 +3825,37 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
3982 if (!(adev->flags & AMD_IS_APU)) 3825 if (!(adev->flags & AMD_IS_APU))
3983 gfx_v8_0_enable_gui_idle_interrupt(adev, false); 3826 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
3984 3827
3985 if (!adev->firmware.smu_load) { 3828 if (!adev->pp_enabled) {
3986 /* legacy firmware loading */ 3829 if (!adev->firmware.smu_load) {
3987 r = gfx_v8_0_cp_gfx_load_microcode(adev); 3830 /* legacy firmware loading */
3988 if (r) 3831 r = gfx_v8_0_cp_gfx_load_microcode(adev);
3989 return r; 3832 if (r)
3990 3833 return r;
3991 r = gfx_v8_0_cp_compute_load_microcode(adev);
3992 if (r)
3993 return r;
3994 } else {
3995 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3996 AMDGPU_UCODE_ID_CP_CE);
3997 if (r)
3998 return -EINVAL;
3999
4000 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
4001 AMDGPU_UCODE_ID_CP_PFP);
4002 if (r)
4003 return -EINVAL;
4004
4005 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
4006 AMDGPU_UCODE_ID_CP_ME);
4007 if (r)
4008 return -EINVAL;
4009 3834
4010 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 3835 r = gfx_v8_0_cp_compute_load_microcode(adev);
4011 AMDGPU_UCODE_ID_CP_MEC1); 3836 if (r)
4012 if (r) 3837 return r;
4013 return -EINVAL; 3838 } else {
3839 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3840 AMDGPU_UCODE_ID_CP_CE);
3841 if (r)
3842 return -EINVAL;
3843
3844 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3845 AMDGPU_UCODE_ID_CP_PFP);
3846 if (r)
3847 return -EINVAL;
3848
3849 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3850 AMDGPU_UCODE_ID_CP_ME);
3851 if (r)
3852 return -EINVAL;
3853
3854 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3855 AMDGPU_UCODE_ID_CP_MEC1);
3856 if (r)
3857 return -EINVAL;
3858 }
4014 } 3859 }
4015 3860
4016 r = gfx_v8_0_cp_gfx_resume(adev); 3861 r = gfx_v8_0_cp_gfx_resume(adev);
@@ -4458,15 +4303,261 @@ static int gfx_v8_0_early_init(void *handle)
4458 return 0; 4303 return 0;
4459} 4304}
4460 4305
4306static int gfx_v8_0_late_init(void *handle)
4307{
4308 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4309 int r;
4310
4311 /* requires IBs so do in late init after IB pool is initialized */
4312 r = gfx_v8_0_do_edc_gpr_workarounds(adev);
4313 if (r)
4314 return r;
4315
4316 return 0;
4317}
4318
4461static int gfx_v8_0_set_powergating_state(void *handle, 4319static int gfx_v8_0_set_powergating_state(void *handle,
4462 enum amd_powergating_state state) 4320 enum amd_powergating_state state)
4463{ 4321{
4464 return 0; 4322 return 0;
4465} 4323}
4466 4324
4325static void fiji_send_serdes_cmd(struct amdgpu_device *adev,
4326 uint32_t reg_addr, uint32_t cmd)
4327{
4328 uint32_t data;
4329
4330 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4331
4332 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
4333 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
4334
4335 data = RREG32(mmRLC_SERDES_WR_CTRL);
4336 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
4337 RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
4338 RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
4339 RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
4340 RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
4341 RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
4342 RLC_SERDES_WR_CTRL__POWER_UP_MASK |
4343 RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
4344 RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
4345 RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
4346 RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
4347 data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
4348 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
4349 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
4350 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
4351
4352 WREG32(mmRLC_SERDES_WR_CTRL, data);
4353}
4354
4355static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4356 bool enable)
4357{
4358 uint32_t temp, data;
4359
4360 /* It is disabled by HW by default */
4361 if (enable) {
4362 /* 1 - RLC memory Light sleep */
4363 temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
4364 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4365 if (temp != data)
4366 WREG32(mmRLC_MEM_SLP_CNTL, data);
4367
4368 /* 2 - CP memory Light sleep */
4369 temp = data = RREG32(mmCP_MEM_SLP_CNTL);
4370 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4371 if (temp != data)
4372 WREG32(mmCP_MEM_SLP_CNTL, data);
4373
4374 /* 3 - RLC_CGTT_MGCG_OVERRIDE */
4375 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4376 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
4377 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
4378 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
4379 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
4380
4381 if (temp != data)
4382 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4383
4384 /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4385 gfx_v8_0_wait_for_rlc_serdes(adev);
4386
4387 /* 5 - clear mgcg override */
4388 fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
4389
4390 /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
4391 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
4392 data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
4393 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
4394 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
4395 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
4396 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
4397 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
4398 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
4399 if (temp != data)
4400 WREG32(mmCGTS_SM_CTRL_REG, data);
4401 udelay(50);
4402
4403 /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4404 gfx_v8_0_wait_for_rlc_serdes(adev);
4405 } else {
4406 /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
4407 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4408 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
4409 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
4410 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
4411 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
4412 if (temp != data)
4413 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4414
4415 /* 2 - disable MGLS in RLC */
4416 data = RREG32(mmRLC_MEM_SLP_CNTL);
4417 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4418 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4419 WREG32(mmRLC_MEM_SLP_CNTL, data);
4420 }
4421
4422 /* 3 - disable MGLS in CP */
4423 data = RREG32(mmCP_MEM_SLP_CNTL);
4424 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4425 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4426 WREG32(mmCP_MEM_SLP_CNTL, data);
4427 }
4428
4429 /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
4430 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
4431 data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
4432 CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
4433 if (temp != data)
4434 WREG32(mmCGTS_SM_CTRL_REG, data);
4435
4436 /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4437 gfx_v8_0_wait_for_rlc_serdes(adev);
4438
4439 /* 6 - set mgcg override */
4440 fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
4441
4442 udelay(50);
4443
4444 /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4445 gfx_v8_0_wait_for_rlc_serdes(adev);
4446 }
4447}
4448
4449static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4450 bool enable)
4451{
4452 uint32_t temp, temp1, data, data1;
4453
4454 temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
4455
4456 if (enable) {
4457 /* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
4458 * Cmp_busy/GFX_Idle interrupts
4459 */
4460 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4461
4462 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4463 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
4464 if (temp1 != data1)
4465 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
4466
4467 /* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4468 gfx_v8_0_wait_for_rlc_serdes(adev);
4469
4470 /* 3 - clear cgcg override */
4471 fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
4472
4473 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4474 gfx_v8_0_wait_for_rlc_serdes(adev);
4475
4476 /* 4 - write cmd to set CGLS */
4477 fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
4478
4479 /* 5 - enable cgcg */
4480 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4481
4482 /* enable cgls*/
4483 data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4484
4485 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4486 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
4487
4488 if (temp1 != data1)
4489 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
4490
4491 if (temp != data)
4492 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
4493 } else {
4494 /* disable cntx_empty_int_enable & GFX Idle interrupt */
4495 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4496
4497 /* TEST CGCG */
4498 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4499 data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
4500 RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
4501 if (temp1 != data1)
4502 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
4503
4504 /* read gfx register to wake up cgcg */
4505 RREG32(mmCB_CGTT_SCLK_CTRL);
4506 RREG32(mmCB_CGTT_SCLK_CTRL);
4507 RREG32(mmCB_CGTT_SCLK_CTRL);
4508 RREG32(mmCB_CGTT_SCLK_CTRL);
4509
4510 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4511 gfx_v8_0_wait_for_rlc_serdes(adev);
4512
4513 /* write cmd to Set CGCG Overrride */
4514 fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
4515
4516 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4517 gfx_v8_0_wait_for_rlc_serdes(adev);
4518
4519 /* write cmd to Clear CGLS */
4520 fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
4521
4522 /* disable cgcg, cgls should be disabled too. */
4523 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
4524 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4525 if (temp != data)
4526 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
4527 }
4528}
4529static int fiji_update_gfx_clock_gating(struct amdgpu_device *adev,
4530 bool enable)
4531{
4532 if (enable) {
4533 /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
4534 * === MGCG + MGLS + TS(CG/LS) ===
4535 */
4536 fiji_update_medium_grain_clock_gating(adev, enable);
4537 fiji_update_coarse_grain_clock_gating(adev, enable);
4538 } else {
4539 /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
4540 * === CGCG + CGLS ===
4541 */
4542 fiji_update_coarse_grain_clock_gating(adev, enable);
4543 fiji_update_medium_grain_clock_gating(adev, enable);
4544 }
4545 return 0;
4546}
4547
4467static int gfx_v8_0_set_clockgating_state(void *handle, 4548static int gfx_v8_0_set_clockgating_state(void *handle,
4468 enum amd_clockgating_state state) 4549 enum amd_clockgating_state state)
4469{ 4550{
4551 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4552
4553 switch (adev->asic_type) {
4554 case CHIP_FIJI:
4555 fiji_update_gfx_clock_gating(adev,
4556 state == AMD_CG_STATE_GATE ? true : false);
4557 break;
4558 default:
4559 break;
4560 }
4470 return 0; 4561 return 0;
4471} 4562}
4472 4563
@@ -4627,7 +4718,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
4627 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 4718 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4628 EVENT_INDEX(5))); 4719 EVENT_INDEX(5)));
4629 amdgpu_ring_write(ring, addr & 0xfffffffc); 4720 amdgpu_ring_write(ring, addr & 0xfffffffc);
4630 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 4721 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
4631 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 4722 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4632 amdgpu_ring_write(ring, lower_32_bits(seq)); 4723 amdgpu_ring_write(ring, lower_32_bits(seq));
4633 amdgpu_ring_write(ring, upper_32_bits(seq)); 4724 amdgpu_ring_write(ring, upper_32_bits(seq));
@@ -4995,7 +5086,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
4995 5086
4996const struct amd_ip_funcs gfx_v8_0_ip_funcs = { 5087const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
4997 .early_init = gfx_v8_0_early_init, 5088 .early_init = gfx_v8_0_early_init,
4998 .late_init = NULL, 5089 .late_init = gfx_v8_0_late_init,
4999 .sw_init = gfx_v8_0_sw_init, 5090 .sw_init = gfx_v8_0_sw_init,
5000 .sw_fini = gfx_v8_0_sw_fini, 5091 .sw_fini = gfx_v8_0_sw_fini,
5001 .hw_init = gfx_v8_0_hw_init, 5092 .hw_init = gfx_v8_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ed8abb58a785..3f956065d069 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -370,6 +370,10 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
370 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 370 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
371 adev->mc.visible_vram_size = adev->mc.aper_size; 371 adev->mc.visible_vram_size = adev->mc.aper_size;
372 372
373 /* In case the PCI BAR is larger than the actual amount of vram */
374 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
375 adev->mc.visible_vram_size = adev->mc.real_vram_size;
376
373 /* unless the user had overridden it, set the gart 377 /* unless the user had overridden it, set the gart
374 * size equal to the 1024 or vram, whichever is larger. 378 * size equal to the 1024 or vram, whichever is larger.
375 */ 379 */
@@ -1012,7 +1016,6 @@ static int gmc_v7_0_suspend(void *handle)
1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1016 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1013 1017
1014 if (adev->vm_manager.enabled) { 1018 if (adev->vm_manager.enabled) {
1015 amdgpu_vm_manager_fini(adev);
1016 gmc_v7_0_vm_fini(adev); 1019 gmc_v7_0_vm_fini(adev);
1017 adev->vm_manager.enabled = false; 1020 adev->vm_manager.enabled = false;
1018 } 1021 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d39028440814..c0c9a0101eb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -476,6 +476,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
476 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 476 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
477 adev->mc.visible_vram_size = adev->mc.aper_size; 477 adev->mc.visible_vram_size = adev->mc.aper_size;
478 478
479 /* In case the PCI BAR is larger than the actual amount of vram */
480 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
481 adev->mc.visible_vram_size = adev->mc.real_vram_size;
482
479 /* unless the user had overridden it, set the gart 483 /* unless the user had overridden it, set the gart
480 * size equal to the 1024 or vram, whichever is larger. 484 * size equal to the 1024 or vram, whichever is larger.
481 */ 485 */
@@ -1033,7 +1037,6 @@ static int gmc_v8_0_suspend(void *handle)
1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1037 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1034 1038
1035 if (adev->vm_manager.enabled) { 1039 if (adev->vm_manager.enabled) {
1036 amdgpu_vm_manager_fini(adev);
1037 gmc_v8_0_vm_fini(adev); 1040 gmc_v8_0_vm_fini(adev);
1038 adev->vm_manager.enabled = false; 1041 adev->vm_manager.enabled = false;
1039 } 1042 }
@@ -1324,9 +1327,181 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1324 return 0; 1327 return 0;
1325} 1328}
1326 1329
1330static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1331 bool enable)
1332{
1333 uint32_t data;
1334
1335 if (enable) {
1336 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1337 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1338 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1339
1340 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1341 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1342 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1343
1344 data = RREG32(mmMC_HUB_MISC_VM_CG);
1345 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1346 WREG32(mmMC_HUB_MISC_VM_CG, data);
1347
1348 data = RREG32(mmMC_XPB_CLK_GAT);
1349 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1350 WREG32(mmMC_XPB_CLK_GAT, data);
1351
1352 data = RREG32(mmATC_MISC_CG);
1353 data |= ATC_MISC_CG__ENABLE_MASK;
1354 WREG32(mmATC_MISC_CG, data);
1355
1356 data = RREG32(mmMC_CITF_MISC_WR_CG);
1357 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1358 WREG32(mmMC_CITF_MISC_WR_CG, data);
1359
1360 data = RREG32(mmMC_CITF_MISC_RD_CG);
1361 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1362 WREG32(mmMC_CITF_MISC_RD_CG, data);
1363
1364 data = RREG32(mmMC_CITF_MISC_VM_CG);
1365 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1366 WREG32(mmMC_CITF_MISC_VM_CG, data);
1367
1368 data = RREG32(mmVM_L2_CG);
1369 data |= VM_L2_CG__ENABLE_MASK;
1370 WREG32(mmVM_L2_CG, data);
1371 } else {
1372 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1373 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1374 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1375
1376 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1377 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1378 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1379
1380 data = RREG32(mmMC_HUB_MISC_VM_CG);
1381 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1382 WREG32(mmMC_HUB_MISC_VM_CG, data);
1383
1384 data = RREG32(mmMC_XPB_CLK_GAT);
1385 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1386 WREG32(mmMC_XPB_CLK_GAT, data);
1387
1388 data = RREG32(mmATC_MISC_CG);
1389 data &= ~ATC_MISC_CG__ENABLE_MASK;
1390 WREG32(mmATC_MISC_CG, data);
1391
1392 data = RREG32(mmMC_CITF_MISC_WR_CG);
1393 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1394 WREG32(mmMC_CITF_MISC_WR_CG, data);
1395
1396 data = RREG32(mmMC_CITF_MISC_RD_CG);
1397 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1398 WREG32(mmMC_CITF_MISC_RD_CG, data);
1399
1400 data = RREG32(mmMC_CITF_MISC_VM_CG);
1401 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1402 WREG32(mmMC_CITF_MISC_VM_CG, data);
1403
1404 data = RREG32(mmVM_L2_CG);
1405 data &= ~VM_L2_CG__ENABLE_MASK;
1406 WREG32(mmVM_L2_CG, data);
1407 }
1408}
1409
1410static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1411 bool enable)
1412{
1413 uint32_t data;
1414
1415 if (enable) {
1416 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1417 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1418 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1419
1420 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1421 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1422 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1423
1424 data = RREG32(mmMC_HUB_MISC_VM_CG);
1425 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1426 WREG32(mmMC_HUB_MISC_VM_CG, data);
1427
1428 data = RREG32(mmMC_XPB_CLK_GAT);
1429 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1430 WREG32(mmMC_XPB_CLK_GAT, data);
1431
1432 data = RREG32(mmATC_MISC_CG);
1433 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1434 WREG32(mmATC_MISC_CG, data);
1435
1436 data = RREG32(mmMC_CITF_MISC_WR_CG);
1437 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1438 WREG32(mmMC_CITF_MISC_WR_CG, data);
1439
1440 data = RREG32(mmMC_CITF_MISC_RD_CG);
1441 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1442 WREG32(mmMC_CITF_MISC_RD_CG, data);
1443
1444 data = RREG32(mmMC_CITF_MISC_VM_CG);
1445 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1446 WREG32(mmMC_CITF_MISC_VM_CG, data);
1447
1448 data = RREG32(mmVM_L2_CG);
1449 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1450 WREG32(mmVM_L2_CG, data);
1451 } else {
1452 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1453 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1454 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1455
1456 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1457 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1458 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1459
1460 data = RREG32(mmMC_HUB_MISC_VM_CG);
1461 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1462 WREG32(mmMC_HUB_MISC_VM_CG, data);
1463
1464 data = RREG32(mmMC_XPB_CLK_GAT);
1465 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1466 WREG32(mmMC_XPB_CLK_GAT, data);
1467
1468 data = RREG32(mmATC_MISC_CG);
1469 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1470 WREG32(mmATC_MISC_CG, data);
1471
1472 data = RREG32(mmMC_CITF_MISC_WR_CG);
1473 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1474 WREG32(mmMC_CITF_MISC_WR_CG, data);
1475
1476 data = RREG32(mmMC_CITF_MISC_RD_CG);
1477 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1478 WREG32(mmMC_CITF_MISC_RD_CG, data);
1479
1480 data = RREG32(mmMC_CITF_MISC_VM_CG);
1481 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1482 WREG32(mmMC_CITF_MISC_VM_CG, data);
1483
1484 data = RREG32(mmVM_L2_CG);
1485 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1486 WREG32(mmVM_L2_CG, data);
1487 }
1488}
1489
1327static int gmc_v8_0_set_clockgating_state(void *handle, 1490static int gmc_v8_0_set_clockgating_state(void *handle,
1328 enum amd_clockgating_state state) 1491 enum amd_clockgating_state state)
1329{ 1492{
1493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494
1495 switch (adev->asic_type) {
1496 case CHIP_FIJI:
1497 fiji_update_mc_medium_grain_clock_gating(adev,
1498 state == AMD_CG_STATE_GATE ? true : false);
1499 fiji_update_mc_light_sleep(adev,
1500 state == AMD_CG_STATE_GATE ? true : false);
1501 break;
1502 default:
1503 break;
1504 }
1330 return 0; 1505 return 0;
1331} 1506}
1332 1507
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 779532d350ff..679e7394a495 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -253,8 +253,14 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev)
253static int iceland_ih_early_init(void *handle) 253static int iceland_ih_early_init(void *handle)
254{ 254{
255 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 255 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256 int ret;
257
258 ret = amdgpu_irq_add_domain(adev);
259 if (ret)
260 return ret;
256 261
257 iceland_ih_set_interrupt_funcs(adev); 262 iceland_ih_set_interrupt_funcs(adev);
263
258 return 0; 264 return 0;
259} 265}
260 266
@@ -278,6 +284,7 @@ static int iceland_ih_sw_fini(void *handle)
278 284
279 amdgpu_irq_fini(adev); 285 amdgpu_irq_fini(adev);
280 amdgpu_ih_ring_fini(adev); 286 amdgpu_ih_ring_fini(adev);
287 amdgpu_irq_remove_domain(adev);
281 288
282 return 0; 289 return 0;
283} 290}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7253132f04b8..ad54c46751b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -727,18 +727,20 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
727{ 727{
728 int r, i; 728 int r, i;
729 729
730 if (!adev->firmware.smu_load) { 730 if (!adev->pp_enabled) {
731 r = sdma_v3_0_load_microcode(adev); 731 if (!adev->firmware.smu_load) {
732 if (r) 732 r = sdma_v3_0_load_microcode(adev);
733 return r;
734 } else {
735 for (i = 0; i < adev->sdma.num_instances; i++) {
736 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
737 (i == 0) ?
738 AMDGPU_UCODE_ID_SDMA0 :
739 AMDGPU_UCODE_ID_SDMA1);
740 if (r) 733 if (r)
741 return -EINVAL; 734 return r;
735 } else {
736 for (i = 0; i < adev->sdma.num_instances; i++) {
737 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
738 (i == 0) ?
739 AMDGPU_UCODE_ID_SDMA0 :
740 AMDGPU_UCODE_ID_SDMA1);
741 if (r)
742 return -EINVAL;
743 }
742 } 744 }
743 } 745 }
744 746
@@ -1427,9 +1429,114 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1427 return 0; 1429 return 0;
1428} 1430}
1429 1431
1432static void fiji_update_sdma_medium_grain_clock_gating(
1433 struct amdgpu_device *adev,
1434 bool enable)
1435{
1436 uint32_t temp, data;
1437
1438 if (enable) {
1439 temp = data = RREG32(mmSDMA0_CLK_CTRL);
1440 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1441 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1442 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1443 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1444 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1445 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1446 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1447 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1448 if (data != temp)
1449 WREG32(mmSDMA0_CLK_CTRL, data);
1450
1451 temp = data = RREG32(mmSDMA1_CLK_CTRL);
1452 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1453 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1454 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1455 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1456 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1457 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1458 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1459 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1460
1461 if (data != temp)
1462 WREG32(mmSDMA1_CLK_CTRL, data);
1463 } else {
1464 temp = data = RREG32(mmSDMA0_CLK_CTRL);
1465 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1466 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1467 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1468 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1469 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1470 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1471 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1472 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1473
1474 if (data != temp)
1475 WREG32(mmSDMA0_CLK_CTRL, data);
1476
1477 temp = data = RREG32(mmSDMA1_CLK_CTRL);
1478 data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1479 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1480 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1481 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1482 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1483 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1484 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1485 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1486
1487 if (data != temp)
1488 WREG32(mmSDMA1_CLK_CTRL, data);
1489 }
1490}
1491
1492static void fiji_update_sdma_medium_grain_light_sleep(
1493 struct amdgpu_device *adev,
1494 bool enable)
1495{
1496 uint32_t temp, data;
1497
1498 if (enable) {
1499 temp = data = RREG32(mmSDMA0_POWER_CNTL);
1500 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1501
1502 if (temp != data)
1503 WREG32(mmSDMA0_POWER_CNTL, data);
1504
1505 temp = data = RREG32(mmSDMA1_POWER_CNTL);
1506 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1507
1508 if (temp != data)
1509 WREG32(mmSDMA1_POWER_CNTL, data);
1510 } else {
1511 temp = data = RREG32(mmSDMA0_POWER_CNTL);
1512 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1513
1514 if (temp != data)
1515 WREG32(mmSDMA0_POWER_CNTL, data);
1516
1517 temp = data = RREG32(mmSDMA1_POWER_CNTL);
1518 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1519
1520 if (temp != data)
1521 WREG32(mmSDMA1_POWER_CNTL, data);
1522 }
1523}
1524
1430static int sdma_v3_0_set_clockgating_state(void *handle, 1525static int sdma_v3_0_set_clockgating_state(void *handle,
1431 enum amd_clockgating_state state) 1526 enum amd_clockgating_state state)
1432{ 1527{
1528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1529
1530 switch (adev->asic_type) {
1531 case CHIP_FIJI:
1532 fiji_update_sdma_medium_grain_clock_gating(adev,
1533 state == AMD_CG_STATE_GATE ? true : false);
1534 fiji_update_sdma_medium_grain_light_sleep(adev,
1535 state == AMD_CG_STATE_GATE ? true : false);
1536 break;
1537 default:
1538 break;
1539 }
1433 return 0; 1540 return 0;
1434} 1541}
1435 1542
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index 204903897b4f..f4a1346525fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -24,7 +24,7 @@
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include "drmP.h" 25#include "drmP.h"
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "tonga_smumgr.h" 27#include "tonga_smum.h"
28 28
29MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); 29MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
30 30
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 743c372837aa..b6f7d7bff929 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -273,8 +273,14 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev)
273static int tonga_ih_early_init(void *handle) 273static int tonga_ih_early_init(void *handle)
274{ 274{
275 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
276 int ret;
277
278 ret = amdgpu_irq_add_domain(adev);
279 if (ret)
280 return ret;
276 281
277 tonga_ih_set_interrupt_funcs(adev); 282 tonga_ih_set_interrupt_funcs(adev);
283
278 return 0; 284 return 0;
279} 285}
280 286
@@ -301,6 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
301 307
302 amdgpu_irq_fini(adev); 308 amdgpu_irq_fini(adev);
303 amdgpu_ih_ring_fini(adev); 309 amdgpu_ih_ring_fini(adev);
310 amdgpu_irq_add_domain(adev);
304 311
305 return 0; 312 return 0;
306} 313}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h
deleted file mode 100644
index 811781f69482..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h
+++ /dev/null
@@ -1,198 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_PP_SMC_H
25#define TONGA_PP_SMC_H
26
27#pragma pack(push, 1)
28
29#define PPSMC_SWSTATE_FLAG_DC 0x01
30#define PPSMC_SWSTATE_FLAG_UVD 0x02
31#define PPSMC_SWSTATE_FLAG_VCE 0x04
32#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08
33
34#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
35#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
36#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
37
38#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
39#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
40#define PPSMC_SYSTEMFLAG_GDDR5 0x04
41
42#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
43
44#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
45#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
46#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40
47
48#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
49#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
50
51#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
52#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
53
54#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10
55#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20
56#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40
57
58#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
59#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
60#define PPSMC_DPM2FLAGS_OCP 0x04
61
62#define PPSMC_DISPLAY_WATERMARK_LOW 0
63#define PPSMC_DISPLAY_WATERMARK_HIGH 1
64
65#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
66#define PPSMC_STATEFLAG_POWERBOOST 0x02
67#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
68#define PPSMC_STATEFLAG_POWERSHIFT 0x08
69#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
70#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
71#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
72
73#define FDO_MODE_HARDWARE 0
74#define FDO_MODE_PIECE_WISE_LINEAR 1
75
76enum FAN_CONTROL {
77 FAN_CONTROL_FUZZY,
78 FAN_CONTROL_TABLE
79};
80
81#define PPSMC_Result_OK ((uint16_t)0x01)
82#define PPSMC_Result_NoMore ((uint16_t)0x02)
83#define PPSMC_Result_NotNow ((uint16_t)0x03)
84#define PPSMC_Result_Failed ((uint16_t)0xFF)
85#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
86#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
87
88typedef uint16_t PPSMC_Result;
89
90#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
91
92#define PPSMC_MSG_Halt ((uint16_t)0x10)
93#define PPSMC_MSG_Resume ((uint16_t)0x11)
94#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
95#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
96#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
97#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
98#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
99#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
100#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
101#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
102#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
103#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
104#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
105#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
106#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
107#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
108#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
109#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
110#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
111#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
112#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
113#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
114#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
115#define PPSMC_CACHistoryStart ((uint16_t)0x57)
116#define PPSMC_CACHistoryStop ((uint16_t)0x58)
117#define PPSMC_TDPClampingActive ((uint16_t)0x59)
118#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
119#define PPSMC_StartFanControl ((uint16_t)0x5B)
120#define PPSMC_StopFanControl ((uint16_t)0x5C)
121#define PPSMC_NoDisplay ((uint16_t)0x5D)
122#define PPSMC_HasDisplay ((uint16_t)0x5E)
123#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
124#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
125#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
126#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
127#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
128#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
129#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
130#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
131#define PPSMC_OCPActive ((uint16_t)0x6C)
132#define PPSMC_OCPInactive ((uint16_t)0x6D)
133#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
134#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
135#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
136#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
137#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
138#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
139#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
140#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
141#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
142#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
143#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
144#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
145#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
146#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
147#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
148#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
149#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
150#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
151#define PPSMC_FlushDataCache ((uint16_t)0x80)
152#define PPSMC_FlushInstrCache ((uint16_t)0x81)
153#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
154#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
155#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
156#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
157#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
158#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
159#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
160#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
161#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
162#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
163#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
164#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90)
165#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91)
166#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92)
167#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93)
168#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94)
169#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95)
170#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96)
171#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97)
172#define PPSMC_MSG_GPIO17 ((uint16_t)0x98)
173#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99)
174#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A)
175#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B)
176#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C)
177#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D)
178#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E)
179
180#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
181
182#define PPSMC_MSG_Test ((uint16_t)0x100)
183#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250)
184#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251)
185#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252)
186#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253)
187#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254)
188
189typedef uint16_t PPSMC_Msg;
190
191#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
192#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
193#define PPSMC_EVENT_STATUS_DC 0x00000004
194#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
195
196#pragma pack(pop)
197
198#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 5421309c1862..361c49a82323 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -25,7 +25,7 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "tonga_ppsmc.h" 27#include "tonga_ppsmc.h"
28#include "tonga_smumgr.h" 28#include "tonga_smum.h"
29#include "smu_ucode_xfer_vi.h" 29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h" 30#include "amdgpu_ucode.h"
31 31
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h b/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
index c031ff99fe3e..c031ff99fe3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 121915bbc3b6..3d5913926436 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -279,6 +279,234 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
279 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 279 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
280} 280}
281 281
282static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
283 bool enable)
284{
285 u32 data, data1;
286
287 data = RREG32(mmUVD_CGC_GATE);
288 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
289 if (enable) {
290 data |= UVD_CGC_GATE__SYS_MASK |
291 UVD_CGC_GATE__UDEC_MASK |
292 UVD_CGC_GATE__MPEG2_MASK |
293 UVD_CGC_GATE__RBC_MASK |
294 UVD_CGC_GATE__LMI_MC_MASK |
295 UVD_CGC_GATE__IDCT_MASK |
296 UVD_CGC_GATE__MPRD_MASK |
297 UVD_CGC_GATE__MPC_MASK |
298 UVD_CGC_GATE__LBSI_MASK |
299 UVD_CGC_GATE__LRBBM_MASK |
300 UVD_CGC_GATE__UDEC_RE_MASK |
301 UVD_CGC_GATE__UDEC_CM_MASK |
302 UVD_CGC_GATE__UDEC_IT_MASK |
303 UVD_CGC_GATE__UDEC_DB_MASK |
304 UVD_CGC_GATE__UDEC_MP_MASK |
305 UVD_CGC_GATE__WCB_MASK |
306 UVD_CGC_GATE__VCPU_MASK |
307 UVD_CGC_GATE__SCPU_MASK;
308 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
309 UVD_SUVD_CGC_GATE__SIT_MASK |
310 UVD_SUVD_CGC_GATE__SMP_MASK |
311 UVD_SUVD_CGC_GATE__SCM_MASK |
312 UVD_SUVD_CGC_GATE__SDB_MASK |
313 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
314 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
315 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
316 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
317 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
318 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
319 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
320 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
321 } else {
322 data &= ~(UVD_CGC_GATE__SYS_MASK |
323 UVD_CGC_GATE__UDEC_MASK |
324 UVD_CGC_GATE__MPEG2_MASK |
325 UVD_CGC_GATE__RBC_MASK |
326 UVD_CGC_GATE__LMI_MC_MASK |
327 UVD_CGC_GATE__LMI_UMC_MASK |
328 UVD_CGC_GATE__IDCT_MASK |
329 UVD_CGC_GATE__MPRD_MASK |
330 UVD_CGC_GATE__MPC_MASK |
331 UVD_CGC_GATE__LBSI_MASK |
332 UVD_CGC_GATE__LRBBM_MASK |
333 UVD_CGC_GATE__UDEC_RE_MASK |
334 UVD_CGC_GATE__UDEC_CM_MASK |
335 UVD_CGC_GATE__UDEC_IT_MASK |
336 UVD_CGC_GATE__UDEC_DB_MASK |
337 UVD_CGC_GATE__UDEC_MP_MASK |
338 UVD_CGC_GATE__WCB_MASK |
339 UVD_CGC_GATE__VCPU_MASK |
340 UVD_CGC_GATE__SCPU_MASK);
341 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
342 UVD_SUVD_CGC_GATE__SIT_MASK |
343 UVD_SUVD_CGC_GATE__SMP_MASK |
344 UVD_SUVD_CGC_GATE__SCM_MASK |
345 UVD_SUVD_CGC_GATE__SDB_MASK |
346 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
347 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
348 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
349 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
350 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
351 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
352 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
353 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
354 }
355 WREG32(mmUVD_CGC_GATE, data);
356 WREG32(mmUVD_SUVD_CGC_GATE, data1);
357}
358
359static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
360 bool enable)
361{
362 u32 data, data1;
363
364 data = RREG32(mmUVD_CGC_GATE);
365 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
366 if (enable) {
367 data |= UVD_CGC_GATE__SYS_MASK |
368 UVD_CGC_GATE__UDEC_MASK |
369 UVD_CGC_GATE__MPEG2_MASK |
370 UVD_CGC_GATE__RBC_MASK |
371 UVD_CGC_GATE__LMI_MC_MASK |
372 UVD_CGC_GATE__IDCT_MASK |
373 UVD_CGC_GATE__MPRD_MASK |
374 UVD_CGC_GATE__MPC_MASK |
375 UVD_CGC_GATE__LBSI_MASK |
376 UVD_CGC_GATE__LRBBM_MASK |
377 UVD_CGC_GATE__UDEC_RE_MASK |
378 UVD_CGC_GATE__UDEC_CM_MASK |
379 UVD_CGC_GATE__UDEC_IT_MASK |
380 UVD_CGC_GATE__UDEC_DB_MASK |
381 UVD_CGC_GATE__UDEC_MP_MASK |
382 UVD_CGC_GATE__WCB_MASK |
383 UVD_CGC_GATE__VCPU_MASK |
384 UVD_CGC_GATE__SCPU_MASK;
385 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
386 UVD_SUVD_CGC_GATE__SIT_MASK |
387 UVD_SUVD_CGC_GATE__SMP_MASK |
388 UVD_SUVD_CGC_GATE__SCM_MASK |
389 UVD_SUVD_CGC_GATE__SDB_MASK;
390 } else {
391 data &= ~(UVD_CGC_GATE__SYS_MASK |
392 UVD_CGC_GATE__UDEC_MASK |
393 UVD_CGC_GATE__MPEG2_MASK |
394 UVD_CGC_GATE__RBC_MASK |
395 UVD_CGC_GATE__LMI_MC_MASK |
396 UVD_CGC_GATE__LMI_UMC_MASK |
397 UVD_CGC_GATE__IDCT_MASK |
398 UVD_CGC_GATE__MPRD_MASK |
399 UVD_CGC_GATE__MPC_MASK |
400 UVD_CGC_GATE__LBSI_MASK |
401 UVD_CGC_GATE__LRBBM_MASK |
402 UVD_CGC_GATE__UDEC_RE_MASK |
403 UVD_CGC_GATE__UDEC_CM_MASK |
404 UVD_CGC_GATE__UDEC_IT_MASK |
405 UVD_CGC_GATE__UDEC_DB_MASK |
406 UVD_CGC_GATE__UDEC_MP_MASK |
407 UVD_CGC_GATE__WCB_MASK |
408 UVD_CGC_GATE__VCPU_MASK |
409 UVD_CGC_GATE__SCPU_MASK);
410 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
411 UVD_SUVD_CGC_GATE__SIT_MASK |
412 UVD_SUVD_CGC_GATE__SMP_MASK |
413 UVD_SUVD_CGC_GATE__SCM_MASK |
414 UVD_SUVD_CGC_GATE__SDB_MASK);
415 }
416 WREG32(mmUVD_CGC_GATE, data);
417 WREG32(mmUVD_SUVD_CGC_GATE, data1);
418}
419
420static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device *adev,
421 bool swmode)
422{
423 u32 data, data1 = 0, data2;
424
425 /* Always un-gate UVD REGS bit */
426 data = RREG32(mmUVD_CGC_GATE);
427 data &= ~(UVD_CGC_GATE__REGS_MASK);
428 WREG32(mmUVD_CGC_GATE, data);
429
430 data = RREG32(mmUVD_CGC_CTRL);
431 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
432 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
433 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
434 1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER) |
435 4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY);
436
437 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
438 if (swmode) {
439 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
440 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
441 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
442 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
443 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
444 UVD_CGC_CTRL__SYS_MODE_MASK |
445 UVD_CGC_CTRL__UDEC_MODE_MASK |
446 UVD_CGC_CTRL__MPEG2_MODE_MASK |
447 UVD_CGC_CTRL__REGS_MODE_MASK |
448 UVD_CGC_CTRL__RBC_MODE_MASK |
449 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
450 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
451 UVD_CGC_CTRL__IDCT_MODE_MASK |
452 UVD_CGC_CTRL__MPRD_MODE_MASK |
453 UVD_CGC_CTRL__MPC_MODE_MASK |
454 UVD_CGC_CTRL__LBSI_MODE_MASK |
455 UVD_CGC_CTRL__LRBBM_MODE_MASK |
456 UVD_CGC_CTRL__WCB_MODE_MASK |
457 UVD_CGC_CTRL__VCPU_MODE_MASK |
458 UVD_CGC_CTRL__JPEG_MODE_MASK |
459 UVD_CGC_CTRL__SCPU_MODE_MASK);
460 data1 |= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
461 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK;
462 data1 &= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK;
463 data1 |= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2, GATER_DIV_ID);
464 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
465 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
466 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
467 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
468 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
469 } else {
470 data |= UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
471 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
472 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
473 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
474 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
475 UVD_CGC_CTRL__SYS_MODE_MASK |
476 UVD_CGC_CTRL__UDEC_MODE_MASK |
477 UVD_CGC_CTRL__MPEG2_MODE_MASK |
478 UVD_CGC_CTRL__REGS_MODE_MASK |
479 UVD_CGC_CTRL__RBC_MODE_MASK |
480 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
481 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
482 UVD_CGC_CTRL__IDCT_MODE_MASK |
483 UVD_CGC_CTRL__MPRD_MODE_MASK |
484 UVD_CGC_CTRL__MPC_MODE_MASK |
485 UVD_CGC_CTRL__LBSI_MODE_MASK |
486 UVD_CGC_CTRL__LRBBM_MODE_MASK |
487 UVD_CGC_CTRL__WCB_MODE_MASK |
488 UVD_CGC_CTRL__VCPU_MODE_MASK |
489 UVD_CGC_CTRL__SCPU_MODE_MASK;
490 data2 |= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
491 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
492 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
493 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
494 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK;
495 }
496 WREG32(mmUVD_CGC_CTRL, data);
497 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
498
499 data = RREG32_UVD_CTX(ixUVD_CGC_CTRL2);
500 data &= ~(REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
501 REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
502 REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
503 data1 &= (REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
504 REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
505 REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
506 data |= data1;
507 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, data);
508}
509
282/** 510/**
283 * uvd_v6_0_start - start UVD block 511 * uvd_v6_0_start - start UVD block
284 * 512 *
@@ -303,8 +531,19 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
303 531
304 uvd_v6_0_mc_resume(adev); 532 uvd_v6_0_mc_resume(adev);
305 533
306 /* disable clock gating */ 534 /* Set dynamic clock gating in S/W control mode */
307 WREG32(mmUVD_CGC_GATE, 0); 535 if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) {
536 if (adev->flags & AMD_IS_APU)
537 cz_set_uvd_clock_gating_branches(adev, false);
538 else
539 tonga_set_uvd_clock_gating_branches(adev, false);
540 uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
541 } else {
542 /* disable clock gating */
543 uint32_t data = RREG32(mmUVD_CGC_CTRL);
544 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
545 WREG32(mmUVD_CGC_CTRL, data);
546 }
308 547
309 /* disable interupt */ 548 /* disable interupt */
310 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 549 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
@@ -758,6 +997,24 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
758static int uvd_v6_0_set_clockgating_state(void *handle, 997static int uvd_v6_0_set_clockgating_state(void *handle,
759 enum amd_clockgating_state state) 998 enum amd_clockgating_state state)
760{ 999{
1000 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1001 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1002
1003 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG))
1004 return 0;
1005
1006 if (enable) {
1007 if (adev->flags & AMD_IS_APU)
1008 cz_set_uvd_clock_gating_branches(adev, enable);
1009 else
1010 tonga_set_uvd_clock_gating_branches(adev, enable);
1011 uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
1012 } else {
1013 uint32_t data = RREG32(mmUVD_CGC_CTRL);
1014 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1015 WREG32(mmUVD_CGC_CTRL, data);
1016 }
1017
761 return 0; 1018 return 0;
762} 1019}
763 1020
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 370c6c9d81c2..e99af81e4aec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -103,6 +103,108 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
103 WREG32(mmVCE_RB_WPTR2, ring->wptr); 103 WREG32(mmVCE_RB_WPTR2, ring->wptr);
104} 104}
105 105
106static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
107{
108 u32 tmp, data;
109
110 tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
111 if (override)
112 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
113 else
114 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
115
116 if (tmp != data)
117 WREG32(mmVCE_RB_ARB_CTRL, data);
118}
119
120static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
121 bool gated)
122{
123 u32 tmp, data;
124 /* Set Override to disable Clock Gating */
125 vce_v3_0_override_vce_clock_gating(adev, true);
126
127 if (!gated) {
128 /* Force CLOCK ON for VCE_CLOCK_GATING_B,
129 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
130 * VREG can be FORCE ON or set to Dynamic, but can't be OFF
131 */
132 tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
133 data |= 0x1ff;
134 data &= ~0xef0000;
135 if (tmp != data)
136 WREG32(mmVCE_CLOCK_GATING_B, data);
137
138 /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
139 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
140 */
141 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
142 data |= 0x3ff000;
143 data &= ~0xffc00000;
144 if (tmp != data)
145 WREG32(mmVCE_UENC_CLOCK_GATING, data);
146
147 /* set VCE_UENC_CLOCK_GATING_2 */
148 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
149 data |= 0x2;
150 data &= ~0x2;
151 if (tmp != data)
152 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
153
154 /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
155 tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
156 data |= 0x37f;
157 if (tmp != data)
158 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
159
160 /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
161 tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
162 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
163 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
164 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
165 0x8;
166 if (tmp != data)
167 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
168 } else {
169 /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
170 * {*, *_FORCE_OFF} = {*, 1}
171 * set VREG to Dynamic, as it can't be OFF
172 */
173 tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
174 data &= ~0x80010;
175 data |= 0xe70008;
176 if (tmp != data)
177 WREG32(mmVCE_CLOCK_GATING_B, data);
178 /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
179 * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
180 * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
181 */
182 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
183 data |= 0xffc00000;
184 if (tmp != data)
185 WREG32(mmVCE_UENC_CLOCK_GATING, data);
186 /* Set VCE_UENC_CLOCK_GATING_2 */
187 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
188 data |= 0x10000;
189 if (tmp != data)
190 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
191 /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
192 tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
193 data &= ~0xffc00000;
194 if (tmp != data)
195 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
196 /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
197 tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
198 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
199 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
200 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
201 0x8);
202 if (tmp != data)
203 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
204 }
205 vce_v3_0_override_vce_clock_gating(adev, false);
206}
207
106/** 208/**
107 * vce_v3_0_start - start VCE block 209 * vce_v3_0_start - start VCE block
108 * 210 *
@@ -121,7 +223,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
121 if (adev->vce.harvest_config & (1 << idx)) 223 if (adev->vce.harvest_config & (1 << idx))
122 continue; 224 continue;
123 225
124 if(idx == 0) 226 if (idx == 0)
125 WREG32_P(mmGRBM_GFX_INDEX, 0, 227 WREG32_P(mmGRBM_GFX_INDEX, 0,
126 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 228 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
127 else 229 else
@@ -174,6 +276,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
174 /* clear BUSY flag */ 276 /* clear BUSY flag */
175 WREG32_P(mmVCE_STATUS, 0, ~1); 277 WREG32_P(mmVCE_STATUS, 0, ~1);
176 278
279 /* Set Clock-Gating off */
280 if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)
281 vce_v3_0_set_vce_sw_clock_gating(adev, false);
282
177 if (r) { 283 if (r) {
178 DRM_ERROR("VCE not responding, giving up!!!\n"); 284 DRM_ERROR("VCE not responding, giving up!!!\n");
179 mutex_unlock(&adev->grbm_idx_mutex); 285 mutex_unlock(&adev->grbm_idx_mutex);
@@ -208,14 +314,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
208static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 314static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
209{ 315{
210 u32 tmp; 316 u32 tmp;
211 unsigned ret;
212 317
213 /* Fiji, Stoney are single pipe */ 318 /* Fiji, Stoney are single pipe */
214 if ((adev->asic_type == CHIP_FIJI) || 319 if ((adev->asic_type == CHIP_FIJI) ||
215 (adev->asic_type == CHIP_STONEY)){ 320 (adev->asic_type == CHIP_STONEY))
216 ret = AMDGPU_VCE_HARVEST_VCE1; 321 return AMDGPU_VCE_HARVEST_VCE1;
217 return ret;
218 }
219 322
220 /* Tonga and CZ are dual or single pipe */ 323 /* Tonga and CZ are dual or single pipe */
221 if (adev->flags & AMD_IS_APU) 324 if (adev->flags & AMD_IS_APU)
@@ -229,19 +332,14 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
229 332
230 switch (tmp) { 333 switch (tmp) {
231 case 1: 334 case 1:
232 ret = AMDGPU_VCE_HARVEST_VCE0; 335 return AMDGPU_VCE_HARVEST_VCE0;
233 break;
234 case 2: 336 case 2:
235 ret = AMDGPU_VCE_HARVEST_VCE1; 337 return AMDGPU_VCE_HARVEST_VCE1;
236 break;
237 case 3: 338 case 3:
238 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 339 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
239 break;
240 default: 340 default:
241 ret = 0; 341 return 0;
242 } 342 }
243
244 return ret;
245} 343}
246 344
247static int vce_v3_0_early_init(void *handle) 345static int vce_v3_0_early_init(void *handle)
@@ -316,28 +414,22 @@ static int vce_v3_0_sw_fini(void *handle)
316 414
317static int vce_v3_0_hw_init(void *handle) 415static int vce_v3_0_hw_init(void *handle)
318{ 416{
319 struct amdgpu_ring *ring; 417 int r, i;
320 int r;
321 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 418 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
322 419
323 r = vce_v3_0_start(adev); 420 r = vce_v3_0_start(adev);
324 if (r) 421 if (r)
325 return r; 422 return r;
326 423
327 ring = &adev->vce.ring[0]; 424 adev->vce.ring[0].ready = false;
328 ring->ready = true; 425 adev->vce.ring[1].ready = false;
329 r = amdgpu_ring_test_ring(ring);
330 if (r) {
331 ring->ready = false;
332 return r;
333 }
334 426
335 ring = &adev->vce.ring[1]; 427 for (i = 0; i < 2; i++) {
336 ring->ready = true; 428 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
337 r = amdgpu_ring_test_ring(ring); 429 if (r)
338 if (r) { 430 return r;
339 ring->ready = false; 431 else
340 return r; 432 adev->vce.ring[i].ready = true;
341 } 433 }
342 434
343 DRM_INFO("VCE initialized successfully.\n"); 435 DRM_INFO("VCE initialized successfully.\n");
@@ -437,17 +529,9 @@ static bool vce_v3_0_is_idle(void *handle)
437{ 529{
438 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 530 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
439 u32 mask = 0; 531 u32 mask = 0;
440 int idx;
441
442 for (idx = 0; idx < 2; ++idx) {
443 if (adev->vce.harvest_config & (1 << idx))
444 continue;
445 532
446 if (idx == 0) 533 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
447 mask |= SRBM_STATUS2__VCE0_BUSY_MASK; 534 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
448 else
449 mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
450 }
451 535
452 return !(RREG32(mmSRBM_STATUS2) & mask); 536 return !(RREG32(mmSRBM_STATUS2) & mask);
453} 537}
@@ -456,23 +540,11 @@ static int vce_v3_0_wait_for_idle(void *handle)
456{ 540{
457 unsigned i; 541 unsigned i;
458 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 542 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
459 u32 mask = 0;
460 int idx;
461
462 for (idx = 0; idx < 2; ++idx) {
463 if (adev->vce.harvest_config & (1 << idx))
464 continue;
465
466 if (idx == 0)
467 mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
468 else
469 mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
470 }
471 543
472 for (i = 0; i < adev->usec_timeout; i++) { 544 for (i = 0; i < adev->usec_timeout; i++)
473 if (!(RREG32(mmSRBM_STATUS2) & mask)) 545 if (vce_v3_0_is_idle(handle))
474 return 0; 546 return 0;
475 } 547
476 return -ETIMEDOUT; 548 return -ETIMEDOUT;
477} 549}
478 550
@@ -480,17 +552,10 @@ static int vce_v3_0_soft_reset(void *handle)
480{ 552{
481 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
482 u32 mask = 0; 554 u32 mask = 0;
483 int idx;
484 555
485 for (idx = 0; idx < 2; ++idx) { 556 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
486 if (adev->vce.harvest_config & (1 << idx)) 557 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
487 continue;
488 558
489 if (idx == 0)
490 mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
491 else
492 mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
493 }
494 WREG32_P(mmSRBM_SOFT_RESET, mask, 559 WREG32_P(mmSRBM_SOFT_RESET, mask,
495 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | 560 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
496 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); 561 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
@@ -592,10 +657,8 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
592 657
593 switch (entry->src_data) { 658 switch (entry->src_data) {
594 case 0: 659 case 0:
595 amdgpu_fence_process(&adev->vce.ring[0]);
596 break;
597 case 1: 660 case 1:
598 amdgpu_fence_process(&adev->vce.ring[1]); 661 amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
599 break; 662 break;
600 default: 663 default:
601 DRM_ERROR("Unhandled interrupt: %d %d\n", 664 DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -609,6 +672,47 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
609static int vce_v3_0_set_clockgating_state(void *handle, 672static int vce_v3_0_set_clockgating_state(void *handle,
610 enum amd_clockgating_state state) 673 enum amd_clockgating_state state)
611{ 674{
675 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
676 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
677 int i;
678
679 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG))
680 return 0;
681
682 mutex_lock(&adev->grbm_idx_mutex);
683 for (i = 0; i < 2; i++) {
684 /* Program VCE Instance 0 or 1 if not harvested */
685 if (adev->vce.harvest_config & (1 << i))
686 continue;
687
688 if (i == 0)
689 WREG32_P(mmGRBM_GFX_INDEX, 0,
690 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
691 else
692 WREG32_P(mmGRBM_GFX_INDEX,
693 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
694 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
695
696 if (enable) {
697 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
698 uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
699 data &= ~(0xf | 0xff0);
700 data |= ((0x0 << 0) | (0x04 << 4));
701 WREG32(mmVCE_CLOCK_GATING_A, data);
702
703 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
704 data = RREG32(mmVCE_UENC_CLOCK_GATING);
705 data &= ~(0xf | 0xff0);
706 data |= ((0x0 << 0) | (0x04 << 4));
707 WREG32(mmVCE_UENC_CLOCK_GATING, data);
708 }
709
710 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
711 }
712
713 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
714 mutex_unlock(&adev->grbm_idx_mutex);
715
612 return 0; 716 return 0;
613} 717}
614 718
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 2adc1c855e85..652e76644c31 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -31,6 +31,7 @@
31#include "amdgpu_vce.h" 31#include "amdgpu_vce.h"
32#include "amdgpu_ucode.h" 32#include "amdgpu_ucode.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h"
34 35
35#include "gmc/gmc_8_1_d.h" 36#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h" 37#include "gmc/gmc_8_1_sh_mask.h"
@@ -71,6 +72,7 @@
71#include "uvd_v5_0.h" 72#include "uvd_v5_0.h"
72#include "uvd_v6_0.h" 73#include "uvd_v6_0.h"
73#include "vce_v3_0.h" 74#include "vce_v3_0.h"
75#include "amdgpu_powerplay.h"
74 76
75/* 77/*
76 * Indirect registers accessor 78 * Indirect registers accessor
@@ -376,6 +378,38 @@ static bool vi_read_disabled_bios(struct amdgpu_device *adev)
376 WREG32_SMC(ixROM_CNTL, rom_cntl); 378 WREG32_SMC(ixROM_CNTL, rom_cntl);
377 return r; 379 return r;
378} 380}
381
382static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
383 u8 *bios, u32 length_bytes)
384{
385 u32 *dw_ptr;
386 unsigned long flags;
387 u32 i, length_dw;
388
389 if (bios == NULL)
390 return false;
391 if (length_bytes == 0)
392 return false;
393 /* APU vbios image is part of sbios image */
394 if (adev->flags & AMD_IS_APU)
395 return false;
396
397 dw_ptr = (u32 *)bios;
398 length_dw = ALIGN(length_bytes, 4) / 4;
399 /* take the smc lock since we are using the smc index */
400 spin_lock_irqsave(&adev->smc_idx_lock, flags);
401 /* set rom index to 0 */
402 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
403 WREG32(mmSMC_IND_DATA_0, 0);
404 /* set index to data for continous read */
405 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
406 for (i = 0; i < length_dw; i++)
407 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
408 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
409
410 return true;
411}
412
379static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 413static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
380 {mmGB_MACROTILE_MODE7, true}, 414 {mmGB_MACROTILE_MODE7, true},
381}; 415};
@@ -1019,9 +1053,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1019 1053
1020static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 1054static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1021{ 1055{
1022 u32 mask;
1023 int ret;
1024
1025 if (pci_is_root_bus(adev->pdev->bus)) 1056 if (pci_is_root_bus(adev->pdev->bus))
1026 return; 1057 return;
1027 1058
@@ -1031,11 +1062,8 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1031 if (adev->flags & AMD_IS_APU) 1062 if (adev->flags & AMD_IS_APU)
1032 return; 1063 return;
1033 1064
1034 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1065 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1035 if (ret != 0) 1066 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1036 return;
1037
1038 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1039 return; 1067 return;
1040 1068
1041 /* todo */ 1069 /* todo */
@@ -1098,7 +1126,7 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1098 .major = 7, 1126 .major = 7,
1099 .minor = 1, 1127 .minor = 1,
1100 .rev = 0, 1128 .rev = 0,
1101 .funcs = &iceland_dpm_ip_funcs, 1129 .funcs = &amdgpu_pp_ip_funcs,
1102 }, 1130 },
1103 { 1131 {
1104 .type = AMD_IP_BLOCK_TYPE_GFX, 1132 .type = AMD_IP_BLOCK_TYPE_GFX,
@@ -1145,7 +1173,7 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1145 .major = 7, 1173 .major = 7,
1146 .minor = 1, 1174 .minor = 1,
1147 .rev = 0, 1175 .rev = 0,
1148 .funcs = &tonga_dpm_ip_funcs, 1176 .funcs = &amdgpu_pp_ip_funcs,
1149 }, 1177 },
1150 { 1178 {
1151 .type = AMD_IP_BLOCK_TYPE_DCE, 1179 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -1213,7 +1241,7 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1213 .major = 7, 1241 .major = 7,
1214 .minor = 1, 1242 .minor = 1,
1215 .rev = 0, 1243 .rev = 0,
1216 .funcs = &fiji_dpm_ip_funcs, 1244 .funcs = &amdgpu_pp_ip_funcs,
1217 }, 1245 },
1218 { 1246 {
1219 .type = AMD_IP_BLOCK_TYPE_DCE, 1247 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -1281,7 +1309,7 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1281 .major = 8, 1309 .major = 8,
1282 .minor = 0, 1310 .minor = 0,
1283 .rev = 0, 1311 .rev = 0,
1284 .funcs = &cz_dpm_ip_funcs, 1312 .funcs = &amdgpu_pp_ip_funcs
1285 }, 1313 },
1286 { 1314 {
1287 .type = AMD_IP_BLOCK_TYPE_DCE, 1315 .type = AMD_IP_BLOCK_TYPE_DCE,
@@ -1354,20 +1382,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1354 1382
1355static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1383static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1356{ 1384{
1357 if (adev->asic_type == CHIP_TOPAZ) 1385 if (adev->flags & AMD_IS_APU)
1358 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1359 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1360 else if (adev->flags & AMD_IS_APU)
1361 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1386 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1362 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1387 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1363 else 1388 else
1364 return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) 1389 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1365 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; 1390 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1366} 1391}
1367 1392
1368static const struct amdgpu_asic_funcs vi_asic_funcs = 1393static const struct amdgpu_asic_funcs vi_asic_funcs =
1369{ 1394{
1370 .read_disabled_bios = &vi_read_disabled_bios, 1395 .read_disabled_bios = &vi_read_disabled_bios,
1396 .read_bios_from_rom = &vi_read_bios_from_rom,
1371 .read_register = &vi_read_register, 1397 .read_register = &vi_read_register,
1372 .reset = &vi_asic_reset, 1398 .reset = &vi_asic_reset,
1373 .set_vga_state = &vi_vga_set_state, 1399 .set_vga_state = &vi_vga_set_state,
@@ -1416,7 +1442,8 @@ static int vi_common_early_init(void *handle)
1416 break; 1442 break;
1417 case CHIP_FIJI: 1443 case CHIP_FIJI:
1418 adev->has_uvd = true; 1444 adev->has_uvd = true;
1419 adev->cg_flags = 0; 1445 adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
1446 AMDGPU_CG_SUPPORT_VCE_MGCG;
1420 adev->pg_flags = 0; 1447 adev->pg_flags = 0;
1421 adev->external_rev_id = adev->rev_id + 0x3c; 1448 adev->external_rev_id = adev->rev_id + 0x3c;
1422 break; 1449 break;
@@ -1442,6 +1469,8 @@ static int vi_common_early_init(void *handle)
1442 if (amdgpu_smc_load_fw && smc_enabled) 1469 if (amdgpu_smc_load_fw && smc_enabled)
1443 adev->firmware.smu_load = true; 1470 adev->firmware.smu_load = true;
1444 1471
1472 amdgpu_get_pcie_info(adev);
1473
1445 return 0; 1474 return 0;
1446} 1475}
1447 1476
@@ -1515,9 +1544,95 @@ static int vi_common_soft_reset(void *handle)
1515 return 0; 1544 return 0;
1516} 1545}
1517 1546
1547static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1548 bool enable)
1549{
1550 uint32_t temp, data;
1551
1552 temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1553
1554 if (enable)
1555 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1556 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1557 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1558 else
1559 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1560 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1561 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1562
1563 if (temp != data)
1564 WREG32_PCIE(ixPCIE_CNTL2, data);
1565}
1566
1567static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1568 bool enable)
1569{
1570 uint32_t temp, data;
1571
1572 temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1573
1574 if (enable)
1575 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1576 else
1577 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1578
1579 if (temp != data)
1580 WREG32(mmHDP_HOST_PATH_CNTL, data);
1581}
1582
1583static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
1584 bool enable)
1585{
1586 uint32_t temp, data;
1587
1588 temp = data = RREG32(mmHDP_MEM_POWER_LS);
1589
1590 if (enable)
1591 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1592 else
1593 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1594
1595 if (temp != data)
1596 WREG32(mmHDP_MEM_POWER_LS, data);
1597}
1598
1599static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1600 bool enable)
1601{
1602 uint32_t temp, data;
1603
1604 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1605
1606 if (enable)
1607 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1608 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1609 else
1610 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1611 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1612
1613 if (temp != data)
1614 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1615}
1616
1518static int vi_common_set_clockgating_state(void *handle, 1617static int vi_common_set_clockgating_state(void *handle,
1519 enum amd_clockgating_state state) 1618 enum amd_clockgating_state state)
1520{ 1619{
1620 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621
1622 switch (adev->asic_type) {
1623 case CHIP_FIJI:
1624 fiji_update_bif_medium_grain_light_sleep(adev,
1625 state == AMD_CG_STATE_GATE ? true : false);
1626 fiji_update_hdp_medium_grain_clock_gating(adev,
1627 state == AMD_CG_STATE_GATE ? true : false);
1628 fiji_update_hdp_light_sleep(adev,
1629 state == AMD_CG_STATE_GATE ? true : false);
1630 fiji_update_rom_medium_grain_clock_gating(adev,
1631 state == AMD_CG_STATE_GATE ? true : false);
1632 break;
1633 default:
1634 break;
1635 }
1521 return 0; 1636 return 0;
1522} 1637}
1523 1638
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 01a29c3d7011..496360eb3fba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -21,14 +21,63 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef AMDGPU_ACPI_H 24#ifndef AMD_ACPI_H
25#define AMDGPU_ACPI_H 25#define AMD_ACPI_H
26 26
27struct amdgpu_device; 27#define ACPI_AC_CLASS "ac_adapter"
28struct acpi_bus_event;
29 28
30int amdgpu_atif_handler(struct amdgpu_device *adev, 29struct atif_verify_interface {
31 struct acpi_bus_event *event); 30 u16 size; /* structure size in bytes (includes size field) */
31 u16 version; /* version */
32 u32 notification_mask; /* supported notifications mask */
33 u32 function_bits; /* supported functions bit vector */
34} __packed;
35
36struct atif_system_params {
37 u16 size; /* structure size in bytes (includes size field) */
38 u32 valid_mask; /* valid flags mask */
39 u32 flags; /* flags */
40 u8 command_code; /* notify command code */
41} __packed;
42
43struct atif_sbios_requests {
44 u16 size; /* structure size in bytes (includes size field) */
45 u32 pending; /* pending sbios requests */
46 u8 panel_exp_mode; /* panel expansion mode */
47 u8 thermal_gfx; /* thermal state: target gfx controller */
48 u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
49 u8 forced_power_gfx; /* forced power state: target gfx controller */
50 u8 forced_power_state; /* forced power state: state id */
51 u8 system_power_src; /* system power source */
52 u8 backlight_level; /* panel backlight level (0-255) */
53} __packed;
54
55#define ATIF_NOTIFY_MASK 0x3
56#define ATIF_NOTIFY_NONE 0
57#define ATIF_NOTIFY_81 1
58#define ATIF_NOTIFY_N 2
59
60struct atcs_verify_interface {
61 u16 size; /* structure size in bytes (includes size field) */
62 u16 version; /* version */
63 u32 function_bits; /* supported functions bit vector */
64} __packed;
65
66#define ATCS_VALID_FLAGS_MASK 0x3
67
68struct atcs_pref_req_input {
69 u16 size; /* structure size in bytes (includes size field) */
70 u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
71 u16 valid_flags_mask; /* valid flags mask */
72 u16 flags; /* flags */
73 u8 req_type; /* request type */
74 u8 perf_req; /* performance request */
75} __packed;
76
77struct atcs_pref_req_output {
78 u16 size; /* structure size in bytes (includes size field) */
79 u8 ret_val; /* return value */
80} __packed;
32 81
33/* AMD hw uses four ACPI control methods: 82/* AMD hw uses four ACPI control methods:
34 * 1. ATIF 83 * 1. ATIF
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
new file mode 100644
index 000000000000..7c2a916c1e63
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __AMD_PCIE_H__
24#define __AMD_PCIE_H__
25
26/* Following flags shows PCIe link speed supported in driver which are decided by chipset and ASIC */
27#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000
28#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
29#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
30#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
31#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
32
33/* Following flags shows PCIe link speed supported by ASIC H/W.*/
34#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001
35#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
36#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
37#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
38#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
39
40/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
41#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
42#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
43#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 0x00040000
44#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 0x00080000
45#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 0x00100000
46#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 0x00200000
47#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
48#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
49
50#endif
diff --git a/drivers/gpu/drm/amd/include/amd_pcie_helpers.h b/drivers/gpu/drm/amd/include/amd_pcie_helpers.h
new file mode 100644
index 000000000000..5725bf85eacc
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/amd_pcie_helpers.h
@@ -0,0 +1,141 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __AMD_PCIE_HELPERS_H__
24#define __AMD_PCIE_HELPERS_H__
25
26#include "amd_pcie.h"
27
28static inline bool is_pcie_gen3_supported(uint32_t pcie_link_speed_cap)
29{
30 if (pcie_link_speed_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
31 return true;
32
33 return false;
34}
35
36static inline bool is_pcie_gen2_supported(uint32_t pcie_link_speed_cap)
37{
38 if (pcie_link_speed_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
39 return true;
40
41 return false;
42}
43
44/* Get the new PCIE speed given the ASIC PCIE Cap and the NewState's requested PCIE speed*/
45static inline uint16_t get_pcie_gen_support(uint32_t pcie_link_speed_cap,
46 uint16_t ns_pcie_gen)
47{
48 uint32_t asic_pcie_link_speed_cap = (pcie_link_speed_cap &
49 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK);
50 uint32_t sys_pcie_link_speed_cap = (pcie_link_speed_cap &
51 CAIL_PCIE_LINK_SPEED_SUPPORT_MASK);
52
53 switch (asic_pcie_link_speed_cap) {
54 case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1:
55 return PP_PCIEGen1;
56
57 case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2:
58 return PP_PCIEGen2;
59
60 case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3:
61 return PP_PCIEGen3;
62
63 default:
64 if (is_pcie_gen3_supported(sys_pcie_link_speed_cap) &&
65 (ns_pcie_gen == PP_PCIEGen3)) {
66 return PP_PCIEGen3;
67 } else if (is_pcie_gen2_supported(sys_pcie_link_speed_cap) &&
68 ((ns_pcie_gen == PP_PCIEGen3) || (ns_pcie_gen == PP_PCIEGen2))) {
69 return PP_PCIEGen2;
70 }
71 }
72
73 return PP_PCIEGen1;
74}
75
76static inline uint16_t get_pcie_lane_support(uint32_t pcie_lane_width_cap,
77 uint16_t ns_pcie_lanes)
78{
79 int i, j;
80 uint16_t new_pcie_lanes = ns_pcie_lanes;
81 uint16_t pcie_lanes[7] = {1, 2, 4, 8, 12, 16, 32};
82
83 switch (pcie_lane_width_cap) {
84 case 0:
85 printk(KERN_ERR "No valid PCIE lane width reported");
86 break;
87 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
88 new_pcie_lanes = 1;
89 break;
90 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
91 new_pcie_lanes = 2;
92 break;
93 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
94 new_pcie_lanes = 4;
95 break;
96 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
97 new_pcie_lanes = 8;
98 break;
99 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
100 new_pcie_lanes = 12;
101 break;
102 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
103 new_pcie_lanes = 16;
104 break;
105 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
106 new_pcie_lanes = 32;
107 break;
108 default:
109 for (i = 0; i < 7; i++) {
110 if (ns_pcie_lanes == pcie_lanes[i]) {
111 if (pcie_lane_width_cap & (0x10000 << i)) {
112 break;
113 } else {
114 for (j = i - 1; j >= 0; j--) {
115 if (pcie_lane_width_cap & (0x10000 << j)) {
116 new_pcie_lanes = pcie_lanes[j];
117 break;
118 }
119 }
120
121 if (j < 0) {
122 for (j = i + 1; j < 7; j++) {
123 if (pcie_lane_width_cap & (0x10000 << j)) {
124 new_pcie_lanes = pcie_lanes[j];
125 break;
126 }
127 }
128 if (j > 7)
129 printk(KERN_ERR "Cannot find a valid PCIE lane width!");
130 }
131 }
132 break;
133 }
134 }
135 break;
136 }
137
138 return new_pcie_lanes;
139}
140
141#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index fe28fb353fab..1195d06f55bc 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -85,6 +85,27 @@ enum amd_powergating_state {
85 AMD_PG_STATE_UNGATE, 85 AMD_PG_STATE_UNGATE,
86}; 86};
87 87
88enum amd_pm_state_type {
89 /* not used for dpm */
90 POWER_STATE_TYPE_DEFAULT,
91 POWER_STATE_TYPE_POWERSAVE,
92 /* user selectable states */
93 POWER_STATE_TYPE_BATTERY,
94 POWER_STATE_TYPE_BALANCED,
95 POWER_STATE_TYPE_PERFORMANCE,
96 /* internal states */
97 POWER_STATE_TYPE_INTERNAL_UVD,
98 POWER_STATE_TYPE_INTERNAL_UVD_SD,
99 POWER_STATE_TYPE_INTERNAL_UVD_HD,
100 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
101 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
102 POWER_STATE_TYPE_INTERNAL_BOOT,
103 POWER_STATE_TYPE_INTERNAL_THERMAL,
104 POWER_STATE_TYPE_INTERNAL_ACPI,
105 POWER_STATE_TYPE_INTERNAL_ULV,
106 POWER_STATE_TYPE_INTERNAL_3DPERF,
107};
108
88struct amd_ip_funcs { 109struct amd_ip_funcs {
89 /* sets up early driver state (pre sw_init), does not configure hw - Optional */ 110 /* sets up early driver state (pre sw_init), does not configure hw - Optional */
90 int (*early_init)(void *handle); 111 int (*early_init)(void *handle);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 92b6ba0047af..293329719bba 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -596,6 +596,7 @@
596#define mmSWRST_EP_CONTROL_0 0x14ac 596#define mmSWRST_EP_CONTROL_0 0x14ac
597#define mmCPM_CONTROL 0x14b8 597#define mmCPM_CONTROL 0x14b8
598#define mmGSKT_CONTROL 0x14bf 598#define mmGSKT_CONTROL 0x14bf
599#define ixSWRST_COMMAND_1 0x1400103
599#define ixLM_CONTROL 0x1400120 600#define ixLM_CONTROL 0x1400120
600#define ixLM_PCIETXMUX0 0x1400121 601#define ixLM_PCIETXMUX0 0x1400121
601#define ixLM_PCIETXMUX1 0x1400122 602#define ixLM_PCIETXMUX1 0x1400122
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
index daf763ba1a8f..a9b6923192ee 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
@@ -2807,5 +2807,18 @@
2807#define ixDIDT_DBR_WEIGHT0_3 0x90 2807#define ixDIDT_DBR_WEIGHT0_3 0x90
2808#define ixDIDT_DBR_WEIGHT4_7 0x91 2808#define ixDIDT_DBR_WEIGHT4_7 0x91
2809#define ixDIDT_DBR_WEIGHT8_11 0x92 2809#define ixDIDT_DBR_WEIGHT8_11 0x92
2810#define mmTD_EDC_CNT 0x252e
2811#define mmCPF_EDC_TAG_CNT 0x3188
2812#define mmCPF_EDC_ROQ_CNT 0x3189
2813#define mmCPF_EDC_ATC_CNT 0x318a
2814#define mmCPG_EDC_TAG_CNT 0x318b
2815#define mmCPG_EDC_ATC_CNT 0x318c
2816#define mmCPG_EDC_DMA_CNT 0x318d
2817#define mmCPC_EDC_SCRATCH_CNT 0x318e
2818#define mmCPC_EDC_UCODE_CNT 0x318f
2819#define mmCPC_EDC_ATC_CNT 0x3190
2820#define mmDC_EDC_STATE_CNT 0x3191
2821#define mmDC_EDC_CSINVOC_CNT 0x3192
2822#define mmDC_EDC_RESTORE_CNT 0x3193
2810 2823
2811#endif /* GFX_8_0_D_H */ 2824#endif /* GFX_8_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 552622675ace..eaf451e26643 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -550,6 +550,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
550//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL 550//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
551#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04 551#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
552 552
553// use for ComputeMemoryClockParamTable
554typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
555{
556 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock;
557 ULONG ulReserved;
558}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
559
553typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER 560typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
554{ 561{
555 ATOM_COMPUTE_CLOCK_FREQ ulClock; 562 ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -4988,6 +4995,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3
4988 ULONG ulSDCMargine; 4995 ULONG ulSDCMargine;
4989}ATOM_ASIC_PROFILING_INFO_V3_3; 4996}ATOM_ASIC_PROFILING_INFO_V3_3;
4990 4997
4998// for Fiji speed EVV algorithm
4999typedef struct _ATOM_ASIC_PROFILING_INFO_V3_4
5000{
5001 ATOM_COMMON_TABLE_HEADER asHeader;
5002 ULONG ulEvvLkgFactor;
5003 ULONG ulBoardCoreTemp;
5004 ULONG ulMaxVddc;
5005 ULONG ulMinVddc;
5006 ULONG ulLoadLineSlop;
5007 ULONG ulLeakageTemp;
5008 ULONG ulLeakageVoltage;
5009 EFUSE_LINEAR_FUNC_PARAM sCACm;
5010 EFUSE_LINEAR_FUNC_PARAM sCACb;
5011 EFUSE_LOGISTIC_FUNC_PARAM sKt_b;
5012 EFUSE_LOGISTIC_FUNC_PARAM sKv_m;
5013 EFUSE_LOGISTIC_FUNC_PARAM sKv_b;
5014 USHORT usLkgEuseIndex;
5015 UCHAR ucLkgEfuseBitLSB;
5016 UCHAR ucLkgEfuseLength;
5017 ULONG ulLkgEncodeLn_MaxDivMin;
5018 ULONG ulLkgEncodeMax;
5019 ULONG ulLkgEncodeMin;
5020 ULONG ulEfuseLogisticAlpha;
5021 USHORT usPowerDpm0;
5022 USHORT usPowerDpm1;
5023 USHORT usPowerDpm2;
5024 USHORT usPowerDpm3;
5025 USHORT usPowerDpm4;
5026 USHORT usPowerDpm5;
5027 USHORT usPowerDpm6;
5028 USHORT usPowerDpm7;
5029 ULONG ulTdpDerateDPM0;
5030 ULONG ulTdpDerateDPM1;
5031 ULONG ulTdpDerateDPM2;
5032 ULONG ulTdpDerateDPM3;
5033 ULONG ulTdpDerateDPM4;
5034 ULONG ulTdpDerateDPM5;
5035 ULONG ulTdpDerateDPM6;
5036 ULONG ulTdpDerateDPM7;
5037 EFUSE_LINEAR_FUNC_PARAM sRoFuse;
5038 ULONG ulEvvDefaultVddc;
5039 ULONG ulEvvNoCalcVddc;
5040 USHORT usParamNegFlag;
5041 USHORT usSpeed_Model;
5042 ULONG ulSM_A0;
5043 ULONG ulSM_A1;
5044 ULONG ulSM_A2;
5045 ULONG ulSM_A3;
5046 ULONG ulSM_A4;
5047 ULONG ulSM_A5;
5048 ULONG ulSM_A6;
5049 ULONG ulSM_A7;
5050 UCHAR ucSM_A0_sign;
5051 UCHAR ucSM_A1_sign;
5052 UCHAR ucSM_A2_sign;
5053 UCHAR ucSM_A3_sign;
5054 UCHAR ucSM_A4_sign;
5055 UCHAR ucSM_A5_sign;
5056 UCHAR ucSM_A6_sign;
5057 UCHAR ucSM_A7_sign;
5058 ULONG ulMargin_RO_a;
5059 ULONG ulMargin_RO_b;
5060 ULONG ulMargin_RO_c;
5061 ULONG ulMargin_fixed;
5062 ULONG ulMargin_Fmax_mean;
5063 ULONG ulMargin_plat_mean;
5064 ULONG ulMargin_Fmax_sigma;
5065 ULONG ulMargin_plat_sigma;
5066 ULONG ulMargin_DC_sigma;
5067 ULONG ulReserved[8]; // Reserved for future ASIC
5068}ATOM_ASIC_PROFILING_INFO_V3_4;
5069
4991typedef struct _ATOM_POWER_SOURCE_OBJECT 5070typedef struct _ATOM_POWER_SOURCE_OBJECT
4992{ 5071{
4993 UCHAR ucPwrSrcId; // Power source 5072 UCHAR ucPwrSrcId; // Power source
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 992dcd8a5c6a..713aec954692 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -105,6 +105,34 @@ enum cgs_ucode_id {
105 CGS_UCODE_ID_MAXIMUM, 105 CGS_UCODE_ID_MAXIMUM,
106}; 106};
107 107
108enum cgs_system_info_id {
109 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
110 CGS_SYSTEM_INFO_PCIE_GEN_INFO,
111 CGS_SYSTEM_INFO_PCIE_MLW,
112 CGS_SYSTEM_INFO_ID_MAXIMUM,
113};
114
115struct cgs_system_info {
116 uint64_t size;
117 uint64_t info_id;
118 union {
119 void *ptr;
120 uint64_t value;
121 };
122 uint64_t padding[13];
123};
124
125/*
126 * enum cgs_resource_type - GPU resource type
127 */
128enum cgs_resource_type {
129 CGS_RESOURCE_TYPE_MMIO = 0,
130 CGS_RESOURCE_TYPE_FB,
131 CGS_RESOURCE_TYPE_IO,
132 CGS_RESOURCE_TYPE_DOORBELL,
133 CGS_RESOURCE_TYPE_ROM,
134};
135
108/** 136/**
109 * struct cgs_clock_limits - Clock limits 137 * struct cgs_clock_limits - Clock limits
110 * 138 *
@@ -127,8 +155,53 @@ struct cgs_firmware_info {
127 void *kptr; 155 void *kptr;
128}; 156};
129 157
158struct cgs_mode_info {
159 uint32_t refresh_rate;
160 uint32_t ref_clock;
161 uint32_t vblank_time_us;
162};
163
164struct cgs_display_info {
165 uint32_t display_count;
166 uint32_t active_display_mask;
167 struct cgs_mode_info *mode_info;
168};
169
130typedef unsigned long cgs_handle_t; 170typedef unsigned long cgs_handle_t;
131 171
172#define CGS_ACPI_METHOD_ATCS 0x53435441
173#define CGS_ACPI_METHOD_ATIF 0x46495441
174#define CGS_ACPI_METHOD_ATPX 0x58505441
175#define CGS_ACPI_FIELD_METHOD_NAME 0x00000001
176#define CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT 0x00000002
177#define CGS_ACPI_MAX_BUFFER_SIZE 256
178#define CGS_ACPI_TYPE_ANY 0x00
179#define CGS_ACPI_TYPE_INTEGER 0x01
180#define CGS_ACPI_TYPE_STRING 0x02
181#define CGS_ACPI_TYPE_BUFFER 0x03
182#define CGS_ACPI_TYPE_PACKAGE 0x04
183
184struct cgs_acpi_method_argument {
185 uint32_t type;
186 uint32_t method_length;
187 uint32_t data_length;
188 union{
189 uint32_t value;
190 void *pointer;
191 };
192};
193
194struct cgs_acpi_method_info {
195 uint32_t size;
196 uint32_t field;
197 uint32_t input_count;
198 uint32_t name;
199 struct cgs_acpi_method_argument *pinput_argument;
200 uint32_t output_count;
201 struct cgs_acpi_method_argument *poutput_argument;
202 uint32_t padding[9];
203};
204
132/** 205/**
133 * cgs_gpu_mem_info() - Return information about memory heaps 206 * cgs_gpu_mem_info() - Return information about memory heaps
134 * @cgs_device: opaque device handle 207 * @cgs_device: opaque device handle
@@ -355,6 +428,23 @@ typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
355typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr, 428typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
356 uint32_t value); 429 uint32_t value);
357 430
431
432/**
433 * cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
434 * @cgs_device: opaque device handle
435 * @resource_type: Type of Resource (MMIO, IO, ROM, FB, DOORBELL)
436 * @size: size of the region
437 * @offset: offset from the start of the region
438 * @resource_base: base address (not including offset) returned
439 *
440 * Return: 0 on success, -errno otherwise
441 */
442typedef int (*cgs_get_pci_resource_t)(void *cgs_device,
443 enum cgs_resource_type resource_type,
444 uint64_t size,
445 uint64_t offset,
446 uint64_t *resource_base);
447
358/** 448/**
359 * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table 449 * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
360 * @cgs_device: opaque device handle 450 * @cgs_device: opaque device handle
@@ -493,6 +583,21 @@ typedef int(*cgs_set_clockgating_state)(void *cgs_device,
493 enum amd_ip_block_type block_type, 583 enum amd_ip_block_type block_type,
494 enum amd_clockgating_state state); 584 enum amd_clockgating_state state);
495 585
586typedef int(*cgs_get_active_displays_info)(
587 void *cgs_device,
588 struct cgs_display_info *info);
589
590typedef int (*cgs_call_acpi_method)(void *cgs_device,
591 uint32_t acpi_method,
592 uint32_t acpi_function,
593 void *pinput, void *poutput,
594 uint32_t output_count,
595 uint32_t input_size,
596 uint32_t output_size);
597
598typedef int (*cgs_query_system_info)(void *cgs_device,
599 struct cgs_system_info *sys_info);
600
496struct cgs_ops { 601struct cgs_ops {
497 /* memory management calls (similar to KFD interface) */ 602 /* memory management calls (similar to KFD interface) */
498 cgs_gpu_mem_info_t gpu_mem_info; 603 cgs_gpu_mem_info_t gpu_mem_info;
@@ -516,6 +621,8 @@ struct cgs_ops {
516 cgs_write_pci_config_byte_t write_pci_config_byte; 621 cgs_write_pci_config_byte_t write_pci_config_byte;
517 cgs_write_pci_config_word_t write_pci_config_word; 622 cgs_write_pci_config_word_t write_pci_config_word;
518 cgs_write_pci_config_dword_t write_pci_config_dword; 623 cgs_write_pci_config_dword_t write_pci_config_dword;
624 /* PCI resources */
625 cgs_get_pci_resource_t get_pci_resource;
519 /* ATOM BIOS */ 626 /* ATOM BIOS */
520 cgs_atom_get_data_table_t atom_get_data_table; 627 cgs_atom_get_data_table_t atom_get_data_table;
521 cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs; 628 cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
@@ -533,7 +640,12 @@ struct cgs_ops {
533 /* cg pg interface*/ 640 /* cg pg interface*/
534 cgs_set_powergating_state set_powergating_state; 641 cgs_set_powergating_state set_powergating_state;
535 cgs_set_clockgating_state set_clockgating_state; 642 cgs_set_clockgating_state set_clockgating_state;
536 /* ACPI (TODO) */ 643 /* display manager */
644 cgs_get_active_displays_info get_active_displays_info;
645 /* ACPI */
646 cgs_call_acpi_method call_acpi_method;
647 /* get system info */
648 cgs_query_system_info query_system_info;
537}; 649};
538 650
539struct cgs_os_ops; /* To be define in OS-specific CGS header */ 651struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -620,5 +732,15 @@ struct cgs_device
620 CGS_CALL(set_powergating_state, dev, block_type, state) 732 CGS_CALL(set_powergating_state, dev, block_type, state)
621#define cgs_set_clockgating_state(dev, block_type, state) \ 733#define cgs_set_clockgating_state(dev, block_type, state) \
622 CGS_CALL(set_clockgating_state, dev, block_type, state) 734 CGS_CALL(set_clockgating_state, dev, block_type, state)
735#define cgs_get_active_displays_info(dev, info) \
736 CGS_CALL(get_active_displays_info, dev, info)
737#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
738 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
739#define cgs_query_system_info(dev, sys_info) \
740 CGS_CALL(query_system_info, dev, sys_info)
741#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \
742 resource_base) \
743 CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
744 resource_base)
623 745
624#endif /* _CGS_COMMON_H */ 746#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/Kconfig b/drivers/gpu/drm/amd/powerplay/Kconfig
new file mode 100644
index 000000000000..af380335b425
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/Kconfig
@@ -0,0 +1,6 @@
1config DRM_AMD_POWERPLAY
2 bool "Enable AMD powerplay component"
3 depends on DRM_AMDGPU
4 default n
5 help
6 select this option will enable AMD powerplay component.
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
new file mode 100644
index 000000000000..e195bf59da86
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -0,0 +1,22 @@
1
2subdir-ccflags-y += -Iinclude/drm \
3 -Idrivers/gpu/drm/amd/powerplay/inc/ \
4 -Idrivers/gpu/drm/amd/include/asic_reg \
5 -Idrivers/gpu/drm/amd/include \
6 -Idrivers/gpu/drm/amd/powerplay/smumgr\
7 -Idrivers/gpu/drm/amd/powerplay/hwmgr \
8 -Idrivers/gpu/drm/amd/powerplay/eventmgr
9
10AMD_PP_PATH = ../powerplay
11
12PP_LIBS = smumgr hwmgr eventmgr
13
14AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS)))
15
16include $(AMD_POWERPLAY)
17
18POWER_MGR = amd_powerplay.o
19
20AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
21
22AMD_POWERPLAY_FILES += $(AMD_PP_POWER)
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
new file mode 100644
index 000000000000..8f5d5edcf193
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -0,0 +1,660 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include "amd_shared.h"
28#include "amd_powerplay.h"
29#include "pp_instance.h"
30#include "power_state.h"
31#include "eventmanager.h"
32
33#define PP_CHECK(handle) \
34 do { \
35 if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \
36 return -EINVAL; \
37 } while (0)
38
39static int pp_early_init(void *handle)
40{
41 return 0;
42}
43
44static int pp_sw_init(void *handle)
45{
46 struct pp_instance *pp_handle;
47 struct pp_hwmgr *hwmgr;
48 int ret = 0;
49
50 if (handle == NULL)
51 return -EINVAL;
52
53 pp_handle = (struct pp_instance *)handle;
54 hwmgr = pp_handle->hwmgr;
55
56 if (hwmgr == NULL || hwmgr->pptable_func == NULL ||
57 hwmgr->hwmgr_func == NULL ||
58 hwmgr->pptable_func->pptable_init == NULL ||
59 hwmgr->hwmgr_func->backend_init == NULL)
60 return -EINVAL;
61
62 ret = hwmgr->pptable_func->pptable_init(hwmgr);
63
64 if (ret == 0)
65 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
66
67 return ret;
68}
69
70static int pp_sw_fini(void *handle)
71{
72 struct pp_instance *pp_handle;
73 struct pp_hwmgr *hwmgr;
74 int ret = 0;
75
76 if (handle == NULL)
77 return -EINVAL;
78
79 pp_handle = (struct pp_instance *)handle;
80 hwmgr = pp_handle->hwmgr;
81
82 if (hwmgr != NULL || hwmgr->hwmgr_func != NULL ||
83 hwmgr->hwmgr_func->backend_fini != NULL)
84 ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
85
86 return ret;
87}
88
89static int pp_hw_init(void *handle)
90{
91 struct pp_instance *pp_handle;
92 struct pp_smumgr *smumgr;
93 struct pp_eventmgr *eventmgr;
94 int ret = 0;
95
96 if (handle == NULL)
97 return -EINVAL;
98
99 pp_handle = (struct pp_instance *)handle;
100 smumgr = pp_handle->smu_mgr;
101
102 if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
103 smumgr->smumgr_funcs->smu_init == NULL ||
104 smumgr->smumgr_funcs->start_smu == NULL)
105 return -EINVAL;
106
107 ret = smumgr->smumgr_funcs->smu_init(smumgr);
108 if (ret) {
109 printk(KERN_ERR "[ powerplay ] smc initialization failed\n");
110 return ret;
111 }
112
113 ret = smumgr->smumgr_funcs->start_smu(smumgr);
114 if (ret) {
115 printk(KERN_ERR "[ powerplay ] smc start failed\n");
116 smumgr->smumgr_funcs->smu_fini(smumgr);
117 return ret;
118 }
119
120 hw_init_power_state_table(pp_handle->hwmgr);
121 eventmgr = pp_handle->eventmgr;
122
123 if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
124 return -EINVAL;
125
126 ret = eventmgr->pp_eventmgr_init(eventmgr);
127 return 0;
128}
129
130static int pp_hw_fini(void *handle)
131{
132 struct pp_instance *pp_handle;
133 struct pp_smumgr *smumgr;
134 struct pp_eventmgr *eventmgr;
135
136 if (handle == NULL)
137 return -EINVAL;
138
139 pp_handle = (struct pp_instance *)handle;
140 eventmgr = pp_handle->eventmgr;
141
142 if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL)
143 eventmgr->pp_eventmgr_fini(eventmgr);
144
145 smumgr = pp_handle->smu_mgr;
146
147 if (smumgr != NULL || smumgr->smumgr_funcs != NULL ||
148 smumgr->smumgr_funcs->smu_fini != NULL)
149 smumgr->smumgr_funcs->smu_fini(smumgr);
150
151 return 0;
152}
153
154static bool pp_is_idle(void *handle)
155{
156 return 0;
157}
158
159static int pp_wait_for_idle(void *handle)
160{
161 return 0;
162}
163
164static int pp_sw_reset(void *handle)
165{
166 return 0;
167}
168
169static void pp_print_status(void *handle)
170{
171
172}
173
174static int pp_set_clockgating_state(void *handle,
175 enum amd_clockgating_state state)
176{
177 return 0;
178}
179
180static int pp_set_powergating_state(void *handle,
181 enum amd_powergating_state state)
182{
183 return 0;
184}
185
186static int pp_suspend(void *handle)
187{
188 struct pp_instance *pp_handle;
189 struct pp_eventmgr *eventmgr;
190 struct pem_event_data event_data = { {0} };
191
192 if (handle == NULL)
193 return -EINVAL;
194
195 pp_handle = (struct pp_instance *)handle;
196 eventmgr = pp_handle->eventmgr;
197 pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
198 return 0;
199}
200
201static int pp_resume(void *handle)
202{
203 struct pp_instance *pp_handle;
204 struct pp_eventmgr *eventmgr;
205 struct pem_event_data event_data = { {0} };
206 struct pp_smumgr *smumgr;
207 int ret;
208
209 if (handle == NULL)
210 return -EINVAL;
211
212 pp_handle = (struct pp_instance *)handle;
213 smumgr = pp_handle->smu_mgr;
214
215 if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
216 smumgr->smumgr_funcs->start_smu == NULL)
217 return -EINVAL;
218
219 ret = smumgr->smumgr_funcs->start_smu(smumgr);
220 if (ret) {
221 printk(KERN_ERR "[ powerplay ] smc start failed\n");
222 smumgr->smumgr_funcs->smu_fini(smumgr);
223 return ret;
224 }
225
226 eventmgr = pp_handle->eventmgr;
227 pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
228
229 return 0;
230}
231
232const struct amd_ip_funcs pp_ip_funcs = {
233 .early_init = pp_early_init,
234 .late_init = NULL,
235 .sw_init = pp_sw_init,
236 .sw_fini = pp_sw_fini,
237 .hw_init = pp_hw_init,
238 .hw_fini = pp_hw_fini,
239 .suspend = pp_suspend,
240 .resume = pp_resume,
241 .is_idle = pp_is_idle,
242 .wait_for_idle = pp_wait_for_idle,
243 .soft_reset = pp_sw_reset,
244 .print_status = pp_print_status,
245 .set_clockgating_state = pp_set_clockgating_state,
246 .set_powergating_state = pp_set_powergating_state,
247};
248
249static int pp_dpm_load_fw(void *handle)
250{
251 return 0;
252}
253
254static int pp_dpm_fw_loading_complete(void *handle)
255{
256 return 0;
257}
258
259static int pp_dpm_force_performance_level(void *handle,
260 enum amd_dpm_forced_level level)
261{
262 struct pp_instance *pp_handle;
263 struct pp_hwmgr *hwmgr;
264
265 if (handle == NULL)
266 return -EINVAL;
267
268 pp_handle = (struct pp_instance *)handle;
269
270 hwmgr = pp_handle->hwmgr;
271
272 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
273 hwmgr->hwmgr_func->force_dpm_level == NULL)
274 return -EINVAL;
275
276 hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
277
278 return 0;
279}
280
281static enum amd_dpm_forced_level pp_dpm_get_performance_level(
282 void *handle)
283{
284 struct pp_hwmgr *hwmgr;
285
286 if (handle == NULL)
287 return -EINVAL;
288
289 hwmgr = ((struct pp_instance *)handle)->hwmgr;
290
291 if (hwmgr == NULL)
292 return -EINVAL;
293
294 return (((struct pp_instance *)handle)->hwmgr->dpm_level);
295}
296
297static int pp_dpm_get_sclk(void *handle, bool low)
298{
299 struct pp_hwmgr *hwmgr;
300
301 if (handle == NULL)
302 return -EINVAL;
303
304 hwmgr = ((struct pp_instance *)handle)->hwmgr;
305
306 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
307 hwmgr->hwmgr_func->get_sclk == NULL)
308 return -EINVAL;
309
310 return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
311}
312
313static int pp_dpm_get_mclk(void *handle, bool low)
314{
315 struct pp_hwmgr *hwmgr;
316
317 if (handle == NULL)
318 return -EINVAL;
319
320 hwmgr = ((struct pp_instance *)handle)->hwmgr;
321
322 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
323 hwmgr->hwmgr_func->get_mclk == NULL)
324 return -EINVAL;
325
326 return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
327}
328
329static int pp_dpm_powergate_vce(void *handle, bool gate)
330{
331 struct pp_hwmgr *hwmgr;
332
333 if (handle == NULL)
334 return -EINVAL;
335
336 hwmgr = ((struct pp_instance *)handle)->hwmgr;
337
338 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
339 hwmgr->hwmgr_func->powergate_vce == NULL)
340 return -EINVAL;
341
342 return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
343}
344
345static int pp_dpm_powergate_uvd(void *handle, bool gate)
346{
347 struct pp_hwmgr *hwmgr;
348
349 if (handle == NULL)
350 return -EINVAL;
351
352 hwmgr = ((struct pp_instance *)handle)->hwmgr;
353
354 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
355 hwmgr->hwmgr_func->powergate_uvd == NULL)
356 return -EINVAL;
357
358 return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
359}
360
361static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
362{
363 switch (state) {
364 case POWER_STATE_TYPE_BATTERY:
365 return PP_StateUILabel_Battery;
366 case POWER_STATE_TYPE_BALANCED:
367 return PP_StateUILabel_Balanced;
368 case POWER_STATE_TYPE_PERFORMANCE:
369 return PP_StateUILabel_Performance;
370 default:
371 return PP_StateUILabel_None;
372 }
373}
374
375int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
376{
377 int ret = 0;
378 struct pp_instance *pp_handle;
379 struct pem_event_data data = { {0} };
380
381 pp_handle = (struct pp_instance *)handle;
382
383 if (pp_handle == NULL)
384 return -EINVAL;
385
386 switch (event_id) {
387 case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
388 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
389 break;
390 case AMD_PP_EVENT_ENABLE_USER_STATE:
391 {
392 enum amd_pm_state_type ps;
393
394 if (input == NULL)
395 return -EINVAL;
396 ps = *(unsigned long *)input;
397
398 data.requested_ui_label = power_state_convert(ps);
399 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
400 }
401 break;
402 default:
403 break;
404 }
405 return ret;
406}
407
408enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
409{
410 struct pp_hwmgr *hwmgr;
411 struct pp_power_state *state;
412
413 if (handle == NULL)
414 return -EINVAL;
415
416 hwmgr = ((struct pp_instance *)handle)->hwmgr;
417
418 if (hwmgr == NULL || hwmgr->current_ps == NULL)
419 return -EINVAL;
420
421 state = hwmgr->current_ps;
422
423 switch (state->classification.ui_label) {
424 case PP_StateUILabel_Battery:
425 return POWER_STATE_TYPE_BATTERY;
426 case PP_StateUILabel_Balanced:
427 return POWER_STATE_TYPE_BALANCED;
428 case PP_StateUILabel_Performance:
429 return POWER_STATE_TYPE_PERFORMANCE;
430 default:
431 return POWER_STATE_TYPE_DEFAULT;
432 }
433}
434
435static void
436pp_debugfs_print_current_performance_level(void *handle,
437 struct seq_file *m)
438{
439 struct pp_hwmgr *hwmgr;
440
441 if (handle == NULL)
442 return;
443
444 hwmgr = ((struct pp_instance *)handle)->hwmgr;
445
446 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
447 hwmgr->hwmgr_func->print_current_perforce_level == NULL)
448 return;
449
450 hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
451}
452
453static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
454{
455 struct pp_hwmgr *hwmgr;
456
457 if (handle == NULL)
458 return -EINVAL;
459
460 hwmgr = ((struct pp_instance *)handle)->hwmgr;
461
462 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
463 hwmgr->hwmgr_func->set_fan_control_mode == NULL)
464 return -EINVAL;
465
466 return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
467}
468
469static int pp_dpm_get_fan_control_mode(void *handle)
470{
471 struct pp_hwmgr *hwmgr;
472
473 if (handle == NULL)
474 return -EINVAL;
475
476 hwmgr = ((struct pp_instance *)handle)->hwmgr;
477
478 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
479 hwmgr->hwmgr_func->get_fan_control_mode == NULL)
480 return -EINVAL;
481
482 return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
483}
484
485static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
486{
487 struct pp_hwmgr *hwmgr;
488
489 if (handle == NULL)
490 return -EINVAL;
491
492 hwmgr = ((struct pp_instance *)handle)->hwmgr;
493
494 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
495 hwmgr->hwmgr_func->set_fan_speed_percent == NULL)
496 return -EINVAL;
497
498 return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
499}
500
501static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
502{
503 struct pp_hwmgr *hwmgr;
504
505 if (handle == NULL)
506 return -EINVAL;
507
508 hwmgr = ((struct pp_instance *)handle)->hwmgr;
509
510 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
511 hwmgr->hwmgr_func->get_fan_speed_percent == NULL)
512 return -EINVAL;
513
514 return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
515}
516
517static int pp_dpm_get_temperature(void *handle)
518{
519 struct pp_hwmgr *hwmgr;
520
521 if (handle == NULL)
522 return -EINVAL;
523
524 hwmgr = ((struct pp_instance *)handle)->hwmgr;
525
526 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
527 hwmgr->hwmgr_func->get_temperature == NULL)
528 return -EINVAL;
529
530 return hwmgr->hwmgr_func->get_temperature(hwmgr);
531}
532
533const struct amd_powerplay_funcs pp_dpm_funcs = {
534 .get_temperature = pp_dpm_get_temperature,
535 .load_firmware = pp_dpm_load_fw,
536 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
537 .force_performance_level = pp_dpm_force_performance_level,
538 .get_performance_level = pp_dpm_get_performance_level,
539 .get_current_power_state = pp_dpm_get_current_power_state,
540 .get_sclk = pp_dpm_get_sclk,
541 .get_mclk = pp_dpm_get_mclk,
542 .powergate_vce = pp_dpm_powergate_vce,
543 .powergate_uvd = pp_dpm_powergate_uvd,
544 .dispatch_tasks = pp_dpm_dispatch_tasks,
545 .print_current_performance_level = pp_debugfs_print_current_performance_level,
546 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
547 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
548 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
549 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
550};
551
552static int amd_pp_instance_init(struct amd_pp_init *pp_init,
553 struct amd_powerplay *amd_pp)
554{
555 int ret;
556 struct pp_instance *handle;
557
558 handle = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
559 if (handle == NULL)
560 return -ENOMEM;
561
562 handle->pp_valid = PP_VALID;
563
564 ret = smum_init(pp_init, handle);
565 if (ret)
566 goto fail_smum;
567
568 ret = hwmgr_init(pp_init, handle);
569 if (ret)
570 goto fail_hwmgr;
571
572 ret = eventmgr_init(handle);
573 if (ret)
574 goto fail_eventmgr;
575
576 amd_pp->pp_handle = handle;
577 return 0;
578
579fail_eventmgr:
580 hwmgr_fini(handle->hwmgr);
581fail_hwmgr:
582 smum_fini(handle->smu_mgr);
583fail_smum:
584 kfree(handle);
585 return ret;
586}
587
588static int amd_pp_instance_fini(void *handle)
589{
590 struct pp_instance *instance = (struct pp_instance *)handle;
591
592 if (instance == NULL)
593 return -EINVAL;
594
595 eventmgr_fini(instance->eventmgr);
596
597 hwmgr_fini(instance->hwmgr);
598
599 smum_fini(instance->smu_mgr);
600
601 kfree(handle);
602 return 0;
603}
604
605int amd_powerplay_init(struct amd_pp_init *pp_init,
606 struct amd_powerplay *amd_pp)
607{
608 int ret;
609
610 if (pp_init == NULL || amd_pp == NULL)
611 return -EINVAL;
612
613 ret = amd_pp_instance_init(pp_init, amd_pp);
614
615 if (ret)
616 return ret;
617
618 amd_pp->ip_funcs = &pp_ip_funcs;
619 amd_pp->pp_funcs = &pp_dpm_funcs;
620
621 return 0;
622}
623
624int amd_powerplay_fini(void *handle)
625{
626 amd_pp_instance_fini(handle);
627
628 return 0;
629}
630
631/* export this function to DAL */
632
633int amd_powerplay_display_configuration_change(void *handle, const void *input)
634{
635 struct pp_hwmgr *hwmgr;
636 const struct amd_pp_display_configuration *display_config = input;
637
638 PP_CHECK((struct pp_instance *)handle);
639
640 hwmgr = ((struct pp_instance *)handle)->hwmgr;
641
642 phm_store_dal_configuration_data(hwmgr, display_config);
643
644 return 0;
645}
646
647int amd_powerplay_get_display_power_level(void *handle,
648 struct amd_pp_dal_clock_info *output)
649{
650 struct pp_hwmgr *hwmgr;
651
652 PP_CHECK((struct pp_instance *)handle);
653
654 if (output == NULL)
655 return -EINVAL;
656
657 hwmgr = ((struct pp_instance *)handle)->hwmgr;
658
659 return phm_get_dal_power_level(hwmgr, output);
660}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
new file mode 100644
index 000000000000..7509e3850087
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the 'event manager' sub-component of powerplay.
3# It provides the event management services for the driver.
4
5EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \
6 eventactionchains.o eventsubchains.o eventtasks.o psm.o
7
8AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR))
9
10AMD_POWERPLAY_FILES += $(AMD_PP_EVENT)
11
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
new file mode 100644
index 000000000000..83be3cf210e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -0,0 +1,289 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "eventmgr.h"
24#include "eventactionchains.h"
25#include "eventsubchains.h"
26
27static const pem_event_action *initialize_event[] = {
28 block_adjust_power_state_tasks,
29 power_budget_tasks,
30 system_config_tasks,
31 setup_asic_tasks,
32 enable_dynamic_state_management_tasks,
33 enable_clock_power_gatings_tasks,
34 get_2d_performance_state_tasks,
35 set_performance_state_tasks,
36 initialize_thermal_controller_tasks,
37 conditionally_force_3d_performance_state_tasks,
38 process_vbios_eventinfo_tasks,
39 broadcast_power_policy_tasks,
40 NULL
41};
42
43const struct action_chain initialize_action_chain = {
44 "Initialize",
45 initialize_event
46};
47
48static const pem_event_action *uninitialize_event[] = {
49 ungate_all_display_phys_tasks,
50 uninitialize_display_phy_access_tasks,
51 disable_gfx_voltage_island_power_gating_tasks,
52 disable_gfx_clock_gating_tasks,
53 set_boot_state_tasks,
54 adjust_power_state_tasks,
55 disable_dynamic_state_management_tasks,
56 disable_clock_power_gatings_tasks,
57 cleanup_asic_tasks,
58 prepare_for_pnp_stop_tasks,
59 NULL
60};
61
62const struct action_chain uninitialize_action_chain = {
63 "Uninitialize",
64 uninitialize_event
65};
66
67static const pem_event_action *power_source_change_event_pp_enabled[] = {
68 set_power_source_tasks,
69 set_power_saving_state_tasks,
70 adjust_power_state_tasks,
71 enable_disable_fps_tasks,
72 set_nbmcu_state_tasks,
73 broadcast_power_policy_tasks,
74 NULL
75};
76
77const struct action_chain power_source_change_action_chain_pp_enabled = {
78 "Power source change - PowerPlay enabled",
79 power_source_change_event_pp_enabled
80};
81
82static const pem_event_action *power_source_change_event_pp_disabled[] = {
83 set_power_source_tasks,
84 set_nbmcu_state_tasks,
85 NULL
86};
87
88const struct action_chain power_source_changes_action_chain_pp_disabled = {
89 "Power source change - PowerPlay disabled",
90 power_source_change_event_pp_disabled
91};
92
93static const pem_event_action *power_source_change_event_hardware_dc[] = {
94 set_power_source_tasks,
95 set_power_saving_state_tasks,
96 adjust_power_state_tasks,
97 enable_disable_fps_tasks,
98 reset_hardware_dc_notification_tasks,
99 set_nbmcu_state_tasks,
100 broadcast_power_policy_tasks,
101 NULL
102};
103
104const struct action_chain power_source_change_action_chain_hardware_dc = {
105 "Power source change - with Hardware DC switching",
106 power_source_change_event_hardware_dc
107};
108
109static const pem_event_action *suspend_event[] = {
110 reset_display_phy_access_tasks,
111 unregister_interrupt_tasks,
112 disable_gfx_voltage_island_power_gating_tasks,
113 disable_gfx_clock_gating_tasks,
114 notify_smu_suspend_tasks,
115 disable_smc_firmware_ctf_tasks,
116 set_boot_state_tasks,
117 adjust_power_state_tasks,
118 disable_fps_tasks,
119 vari_bright_suspend_tasks,
120 reset_fan_speed_to_default_tasks,
121 power_down_asic_tasks,
122 disable_stutter_mode_tasks,
123 set_connected_standby_tasks,
124 block_hw_access_tasks,
125 NULL
126};
127
128const struct action_chain suspend_action_chain = {
129 "Suspend",
130 suspend_event
131};
132
133static const pem_event_action *resume_event[] = {
134 unblock_hw_access_tasks,
135 resume_connected_standby_tasks,
136 notify_smu_resume_tasks,
137 reset_display_configCounter_tasks,
138 update_dal_configuration_tasks,
139 vari_bright_resume_tasks,
140 block_adjust_power_state_tasks,
141 setup_asic_tasks,
142 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
143 enable_dynamic_state_management_tasks,
144 enable_clock_power_gatings_tasks,
145 enable_disable_bapm_tasks,
146 initialize_thermal_controller_tasks,
147 reset_boot_state_tasks,
148 adjust_power_state_tasks,
149 enable_disable_fps_tasks,
150 notify_hw_power_source_tasks,
151 process_vbios_event_info_tasks,
152 enable_gfx_clock_gating_tasks,
153 enable_gfx_voltage_island_power_gating_tasks,
154 reset_clock_gating_tasks,
155 notify_smu_vpu_recovery_end_tasks,
156 disable_vpu_cap_tasks,
157 execute_escape_sequence_tasks,
158 NULL
159};
160
161
162const struct action_chain resume_action_chain = {
163 "resume",
164 resume_event
165};
166
167static const pem_event_action *complete_init_event[] = {
168 adjust_power_state_tasks,
169 enable_gfx_clock_gating_tasks,
170 enable_gfx_voltage_island_power_gating_tasks,
171 notify_power_state_change_tasks,
172 NULL
173};
174
175const struct action_chain complete_init_action_chain = {
176 "complete init",
177 complete_init_event
178};
179
180static const pem_event_action *enable_gfx_clock_gating_event[] = {
181 enable_gfx_clock_gating_tasks,
182 NULL
183};
184
185const struct action_chain enable_gfx_clock_gating_action_chain = {
186 "enable gfx clock gate",
187 enable_gfx_clock_gating_event
188};
189
190static const pem_event_action *disable_gfx_clock_gating_event[] = {
191 disable_gfx_clock_gating_tasks,
192 NULL
193};
194
195const struct action_chain disable_gfx_clock_gating_action_chain = {
196 "disable gfx clock gate",
197 disable_gfx_clock_gating_event
198};
199
200static const pem_event_action *enable_cgpg_event[] = {
201 enable_cgpg_tasks,
202 NULL
203};
204
205const struct action_chain enable_cgpg_action_chain = {
206 "eable cg pg",
207 enable_cgpg_event
208};
209
210static const pem_event_action *disable_cgpg_event[] = {
211 disable_cgpg_tasks,
212 NULL
213};
214
215const struct action_chain disable_cgpg_action_chain = {
216 "disable cg pg",
217 disable_cgpg_event
218};
219
220
221/* Enable user _2d performance and activate */
222
223static const pem_event_action *enable_user_state_event[] = {
224 create_new_user_performance_state_tasks,
225 adjust_power_state_tasks,
226 NULL
227};
228
229const struct action_chain enable_user_state_action_chain = {
230 "Enable user state",
231 enable_user_state_event
232};
233
234static const pem_event_action *enable_user_2d_performance_event[] = {
235 enable_user_2d_performance_tasks,
236 add_user_2d_performance_state_tasks,
237 set_performance_state_tasks,
238 adjust_power_state_tasks,
239 delete_user_2d_performance_state_tasks,
240 NULL
241};
242
243const struct action_chain enable_user_2d_performance_action_chain = {
244 "enable_user_2d_performance_event_activate",
245 enable_user_2d_performance_event
246};
247
248
249static const pem_event_action *disable_user_2d_performance_event[] = {
250 disable_user_2d_performance_tasks,
251 delete_user_2d_performance_state_tasks,
252 NULL
253};
254
255const struct action_chain disable_user_2d_performance_action_chain = {
256 "disable_user_2d_performance_event",
257 disable_user_2d_performance_event
258};
259
260
261static const pem_event_action *display_config_change_event[] = {
262 /* countDisplayConfigurationChangeEventTasks, */
263 unblock_adjust_power_state_tasks,
264 set_cpu_power_state,
265 notify_hw_power_source_tasks,
266 /* updateDALConfigurationTasks,
267 variBrightDisplayConfigurationChangeTasks, */
268 adjust_power_state_tasks,
269 /*enableDisableFPSTasks,
270 setNBMCUStateTasks,
271 notifyPCIEDeviceReadyTasks,*/
272 NULL
273};
274
275const struct action_chain display_config_change_action_chain = {
276 "Display configuration change",
277 display_config_change_event
278};
279
280static const pem_event_action *readjust_power_state_event[] = {
281 adjust_power_state_tasks,
282 NULL
283};
284
285const struct action_chain readjust_power_state_action_chain = {
286 "re-adjust power state",
287 readjust_power_state_event
288};
289
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
new file mode 100644
index 000000000000..f181e53cdcda
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _EVENT_ACTION_CHAINS_H_
24#define _EVENT_ACTION_CHAINS_H_
25#include "eventmgr.h"
26
27extern const struct action_chain initialize_action_chain;
28
29extern const struct action_chain uninitialize_action_chain;
30
31extern const struct action_chain power_source_change_action_chain_pp_enabled;
32
33extern const struct action_chain power_source_changes_action_chain_pp_disabled;
34
35extern const struct action_chain power_source_change_action_chain_hardware_dc;
36
37extern const struct action_chain suspend_action_chain;
38
39extern const struct action_chain resume_action_chain;
40
41extern const struct action_chain complete_init_action_chain;
42
43extern const struct action_chain enable_gfx_clock_gating_action_chain;
44
45extern const struct action_chain disable_gfx_clock_gating_action_chain;
46
47extern const struct action_chain enable_cgpg_action_chain;
48
49extern const struct action_chain disable_cgpg_action_chain;
50
51extern const struct action_chain enable_user_2d_performance_action_chain;
52
53extern const struct action_chain disable_user_2d_performance_action_chain;
54
55extern const struct action_chain enable_user_state_action_chain;
56
57extern const struct action_chain readjust_power_state_action_chain;
58
59extern const struct action_chain display_config_change_action_chain;
60
61#endif /*_EVENT_ACTION_CHAINS_H_*/
62
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
new file mode 100644
index 000000000000..d5ec8ccbe97d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
@@ -0,0 +1,195 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "eventmgr.h"
24#include "eventinit.h"
25#include "ppinterrupt.h"
26#include "hardwaremanager.h"
27
28void pem_init_feature_info(struct pp_eventmgr *eventmgr)
29{
30
31 /* PowerPlay info */
32 eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable =
33 PP_StateUILabel_Performance;
34
35 eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label =
36 PP_StateUILabel_Performance;
37
38 eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable =
39 PP_StateUILabel_Battery;
40
41 eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label =
42 PP_StateUILabel_Battery;
43
44 if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) {
45 eventmgr->features[PP_Feature_PowerPlay].supported = true;
46 eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION;
47 eventmgr->features[PP_Feature_PowerPlay].enabled_default = true;
48 eventmgr->features[PP_Feature_PowerPlay].enabled = true;
49 } else {
50 eventmgr->features[PP_Feature_PowerPlay].supported = false;
51 eventmgr->features[PP_Feature_PowerPlay].enabled = false;
52 eventmgr->features[PP_Feature_PowerPlay].enabled_default = false;
53 }
54
55 eventmgr->features[PP_Feature_Force3DClock].supported = true;
56 eventmgr->features[PP_Feature_Force3DClock].enabled = false;
57 eventmgr->features[PP_Feature_Force3DClock].enabled_default = false;
58 eventmgr->features[PP_Feature_Force3DClock].version = 1;
59
60 /* over drive*/
61 eventmgr->features[PP_Feature_User2DPerformance].version = 4;
62 eventmgr->features[PP_Feature_User3DPerformance].version = 4;
63 eventmgr->features[PP_Feature_OverdriveTest].version = 4;
64
65 eventmgr->features[PP_Feature_OverDrive].version = 4;
66 eventmgr->features[PP_Feature_OverDrive].enabled = false;
67 eventmgr->features[PP_Feature_OverDrive].enabled_default = false;
68
69 eventmgr->features[PP_Feature_User2DPerformance].supported = false;
70 eventmgr->features[PP_Feature_User2DPerformance].enabled = false;
71 eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false;
72
73 eventmgr->features[PP_Feature_User3DPerformance].supported = false;
74 eventmgr->features[PP_Feature_User3DPerformance].enabled = false;
75 eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false;
76
77 eventmgr->features[PP_Feature_OverdriveTest].supported = false;
78 eventmgr->features[PP_Feature_OverdriveTest].enabled = false;
79 eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false;
80
81 eventmgr->features[PP_Feature_OverDrive].supported = false;
82
83 eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false;
84 eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1;
85 eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
86 eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false;
87
88 /* Multi UVD States support */
89 eventmgr->features[PP_Feature_MultiUVDState].supported = false;
90 eventmgr->features[PP_Feature_MultiUVDState].enabled = false;
91 eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false;
92
93 /* Dynamic UVD States support */
94 eventmgr->features[PP_Feature_DynamicUVDState].supported = false;
95 eventmgr->features[PP_Feature_DynamicUVDState].enabled = false;
96 eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false;
97
98 /* VCE DPM support */
99 eventmgr->features[PP_Feature_VCEDPM].supported = false;
100 eventmgr->features[PP_Feature_VCEDPM].enabled = false;
101 eventmgr->features[PP_Feature_VCEDPM].enabled_default = false;
102
103 /* ACP PowerGating support */
104 eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false;
105 eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false;
106 eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false;
107
108 /* PPM support */
109 eventmgr->features[PP_Feature_PPM].version = 1;
110 eventmgr->features[PP_Feature_PPM].supported = false;
111 eventmgr->features[PP_Feature_PPM].enabled = false;
112
113 /* FFC support (enables fan and temp settings, Gemini needs temp settings) */
114 if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) ||
115 phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) {
116 eventmgr->features[PP_Feature_FFC].version = 1;
117 eventmgr->features[PP_Feature_FFC].supported = true;
118 eventmgr->features[PP_Feature_FFC].enabled = true;
119 eventmgr->features[PP_Feature_FFC].enabled_default = true;
120 } else {
121 eventmgr->features[PP_Feature_FFC].supported = false;
122 eventmgr->features[PP_Feature_FFC].enabled = false;
123 eventmgr->features[PP_Feature_FFC].enabled_default = false;
124 }
125
126 eventmgr->features[PP_Feature_VariBright].supported = false;
127 eventmgr->features[PP_Feature_VariBright].enabled = false;
128 eventmgr->features[PP_Feature_VariBright].enabled_default = false;
129
130 eventmgr->features[PP_Feature_BACO].supported = false;
131 eventmgr->features[PP_Feature_BACO].supported = false;
132 eventmgr->features[PP_Feature_BACO].enabled_default = false;
133
134 /* PowerDown feature support */
135 eventmgr->features[PP_Feature_PowerDown].supported = false;
136 eventmgr->features[PP_Feature_PowerDown].enabled = false;
137 eventmgr->features[PP_Feature_PowerDown].enabled_default = false;
138
139 eventmgr->features[PP_Feature_FPS].version = 1;
140 eventmgr->features[PP_Feature_FPS].supported = false;
141 eventmgr->features[PP_Feature_FPS].enabled_default = false;
142 eventmgr->features[PP_Feature_FPS].enabled = false;
143
144 eventmgr->features[PP_Feature_ViPG].version = 1;
145 eventmgr->features[PP_Feature_ViPG].supported = false;
146 eventmgr->features[PP_Feature_ViPG].enabled_default = false;
147 eventmgr->features[PP_Feature_ViPG].enabled = false;
148}
149
150static int thermal_interrupt_callback(void *private_data,
151 unsigned src_id, const uint32_t *iv_entry)
152{
153 /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/
154 printk("current thermal is out of range \n");
155 return 0;
156}
157
158int pem_register_interrupts(struct pp_eventmgr *eventmgr)
159{
160 int result = 0;
161 struct pp_interrupt_registration_info info;
162
163 info.call_back = thermal_interrupt_callback;
164 info.context = eventmgr;
165
166 result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info);
167
168 /* TODO:
169 * 2. Register CTF event interrupt
170 * 3. Register for vbios events interrupt
171 * 4. Register External Throttle Interrupt
172 * 5. Register Smc To Host Interrupt
173 * */
174 return result;
175}
176
177
178int pem_unregister_interrupts(struct pp_eventmgr *eventmgr)
179{
180 return 0;
181}
182
183
184void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr)
185{
186 eventmgr->features[PP_Feature_MultiUVDState].supported = false;
187 eventmgr->features[PP_Feature_VariBright].supported = false;
188 eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
189 eventmgr->features[PP_Feature_OverDrive].supported = false;
190 eventmgr->features[PP_Feature_OverdriveTest].supported = false;
191 eventmgr->features[PP_Feature_User3DPerformance].supported = false;
192 eventmgr->features[PP_Feature_User2DPerformance].supported = false;
193 eventmgr->features[PP_Feature_PowerPlay].supported = false;
194 eventmgr->features[PP_Feature_Force3DClock].supported = false;
195}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h
new file mode 100644
index 000000000000..9ef96aab3f24
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _EVENTINIT_H_
25#define _EVENTINIT_H_
26
27#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4
28
29void pem_init_feature_info(struct pp_eventmgr *eventmgr);
30void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr);
31int pem_register_interrupts(struct pp_eventmgr *eventmgr);
32int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
33
34#endif /* _EVENTINIT_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
new file mode 100644
index 000000000000..1e2ad5603080
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "eventmanagement.h"
24#include "eventmgr.h"
25#include "eventactionchains.h"
26
27int pem_init_event_action_chains(struct pp_eventmgr *eventmgr)
28{
29 int i;
30
31 for (i = 0; i < AMD_PP_EVENT_MAX; i++)
32 eventmgr->event_chain[i] = NULL;
33
34 eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr);
35 eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr);
36 eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr);
37 eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr);
38 eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr);
39 eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr);
40 eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr);
41 eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr);
42 eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr);
43 eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr);
44 eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr);
45 eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr);
46 eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr);
47 eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr);
48 eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr);
49 eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr);
50 eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr);
51 eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr);
52 eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr);
53 eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr);
54 eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr);
55 eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr);
56 eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr);
57 eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr);
58 eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr);
59 eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr);
60 return 0;
61}
62
63int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
64{
65 const pem_event_action **paction_chain;
66 const pem_event_action *psub_chain;
67 int tmp_result = 0;
68 int result = 0;
69
70 if (eventmgr == NULL || event_chain == NULL || event_data == NULL)
71 return -EINVAL;
72
73 for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) {
74 if (0 != result)
75 return result;
76
77 for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) {
78 tmp_result = (*psub_chain)(eventmgr, event_data);
79 if (0 == result)
80 result = tmp_result;
81 }
82 }
83
84 return result;
85}
86
87const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr)
88{
89 return &suspend_action_chain;
90}
91
92const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr)
93{
94 return &initialize_action_chain;
95}
96
97const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr)
98{
99 return &uninitialize_action_chain;
100}
101
102const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr)
103{
104 return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/
105}
106
107const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr)
108{
109 return &resume_action_chain;
110}
111
112const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr)
113{
114 return NULL;
115}
116
117const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr)
118{
119 return NULL;
120}
121
122const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr)
123{
124 return NULL;
125}
126
127const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
128{
129 return NULL;
130}
131
132const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
133{
134 return NULL;
135}
136
137const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
138{
139 return NULL;
140}
141
142const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
143{
144 return NULL;
145}
146
147const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
148{
149 return NULL;
150}
151
152const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
153{
154 return NULL;
155}
156
157const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
158{
159 return &enable_gfx_clock_gating_action_chain;
160}
161
162const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
163{
164 return &disable_gfx_clock_gating_action_chain;
165}
166
167const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
168{
169 return &enable_cgpg_action_chain;
170}
171
172const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
173{
174 return &disable_cgpg_action_chain;
175}
176
177const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr)
178{
179 return &complete_init_action_chain;
180}
181
182const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr)
183{
184 return NULL;
185}
186
187const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr)
188{
189 return NULL;
190}
191
192const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr)
193{
194 return NULL;
195}
196
197const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr)
198{
199 return NULL;
200}
201
202const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr)
203{
204 return &enable_user_state_action_chain;
205}
206
207const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr)
208{
209 return &readjust_power_state_action_chain;
210}
211
212const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr)
213{
214 return &display_config_change_action_chain;
215}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
new file mode 100644
index 000000000000..383d4b295aa9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _EVENT_MANAGEMENT_H_
24#define _EVENT_MANAGEMENT_H_
25
26#include "eventmgr.h"
27
28int pem_init_event_action_chains(struct pp_eventmgr *eventmgr);
29int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data);
30const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr);
31const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr);
32const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr);
33const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr);
34const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr);
35const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr);
36const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr);
37const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr);
38const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
39const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
40const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
41const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
42const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
43const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
44const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
45const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
46const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
47const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
48const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr);
49const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr);
50const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr);
51const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr);
52const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr);
53
54extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr);
55extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr);
56const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr);
57
58
59#endif /* _EVENT_MANAGEMENT_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
new file mode 100644
index 000000000000..52a3efc97f05
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include "eventmgr.h"
27#include "hwmgr.h"
28#include "eventinit.h"
29#include "eventmanagement.h"
30
31static int pem_init(struct pp_eventmgr *eventmgr)
32{
33 int result = 0;
34 struct pem_event_data event_data;
35
36 /* Initialize PowerPlay feature info */
37 pem_init_feature_info(eventmgr);
38
39 /* Initialize event action chains */
40 pem_init_event_action_chains(eventmgr);
41
42 /* Call initialization event */
43 result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data);
44
45 if (0 != result)
46 return result;
47
48 /* Register interrupt callback functions */
49 result = pem_register_interrupts(eventmgr);
50 return 0;
51}
52
53static void pem_fini(struct pp_eventmgr *eventmgr)
54{
55 struct pem_event_data event_data;
56
57 pem_uninit_featureInfo(eventmgr);
58 pem_unregister_interrupts(eventmgr);
59
60 pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
61
62 if (eventmgr != NULL)
63 kfree(eventmgr);
64}
65
66int eventmgr_init(struct pp_instance *handle)
67{
68 int result = 0;
69 struct pp_eventmgr *eventmgr;
70
71 if (handle == NULL)
72 return -EINVAL;
73
74 eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL);
75 if (eventmgr == NULL)
76 return -ENOMEM;
77
78 eventmgr->hwmgr = handle->hwmgr;
79 handle->eventmgr = eventmgr;
80
81 eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor);
82 eventmgr->pp_eventmgr_init = pem_init;
83 eventmgr->pp_eventmgr_fini = pem_fini;
84
85 return result;
86}
87
88int eventmgr_fini(struct pp_eventmgr *eventmgr)
89{
90 kfree(eventmgr);
91 return 0;
92}
93
94static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data)
95{
96 if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL)
97 return -EINVAL;
98
99 return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data);
100}
101
102int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data)
103{
104 int r = 0;
105
106 r = pem_handle_event_unlocked(eventmgr, event, event_data);
107
108 return r;
109}
110
111bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr)
112{
113 return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr));
114}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
new file mode 100644
index 000000000000..9ef2d90e2886
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
@@ -0,0 +1,410 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "eventmgr.h"
25#include "eventsubchains.h"
26#include "eventtasks.h"
27#include "hardwaremanager.h"
28
29const pem_event_action reset_display_phy_access_tasks[] = {
30 pem_task_reset_display_phys_access,
31 NULL
32};
33
34const pem_event_action broadcast_power_policy_tasks[] = {
35 /* PEM_Task_BroadcastPowerPolicyChange, */
36 NULL
37};
38
39const pem_event_action unregister_interrupt_tasks[] = {
40 pem_task_unregister_interrupts,
41 NULL
42};
43
44/* Disable GFX Voltage Islands Power Gating */
45const pem_event_action disable_gfx_voltage_island_powergating_tasks[] = {
46 pem_task_disable_voltage_island_power_gating,
47 NULL
48};
49
50const pem_event_action disable_gfx_clockgating_tasks[] = {
51 pem_task_disable_gfx_clock_gating,
52 NULL
53};
54
55const pem_event_action block_adjust_power_state_tasks[] = {
56 pem_task_block_adjust_power_state,
57 NULL
58};
59
60
61const pem_event_action unblock_adjust_power_state_tasks[] = {
62 pem_task_unblock_adjust_power_state,
63 NULL
64};
65
66const pem_event_action set_performance_state_tasks[] = {
67 pem_task_set_performance_state,
68 NULL
69};
70
71const pem_event_action get_2d_performance_state_tasks[] = {
72 pem_task_get_2D_performance_state_id,
73 NULL
74};
75
76const pem_event_action conditionally_force3D_performance_state_tasks[] = {
77 pem_task_conditionally_force_3d_performance_state,
78 NULL
79};
80
81const pem_event_action process_vbios_eventinfo_tasks[] = {
82 /* PEM_Task_ProcessVbiosEventInfo,*/
83 NULL
84};
85
86const pem_event_action enable_dynamic_state_management_tasks[] = {
87 /* PEM_Task_ResetBAPMPolicyChangedFlag,*/
88 pem_task_get_boot_state_id,
89 pem_task_enable_dynamic_state_management,
90 pem_task_register_interrupts,
91 NULL
92};
93
94const pem_event_action enable_clock_power_gatings_tasks[] = {
95 pem_task_enable_clock_power_gatings_tasks,
96 pem_task_powerdown_uvd_tasks,
97 pem_task_powerdown_vce_tasks,
98 NULL
99};
100
101const pem_event_action setup_asic_tasks[] = {
102 pem_task_setup_asic,
103 NULL
104};
105
106const pem_event_action power_budget_tasks[] = {
107 /* TODO
108 * PEM_Task_PowerBudgetWaiverAvailable,
109 * PEM_Task_PowerBudgetWarningMessage,
110 * PEM_Task_PruneStatesBasedOnPowerBudget,
111 */
112 NULL
113};
114
115const pem_event_action system_config_tasks[] = {
116 /* PEM_Task_PruneStatesBasedOnSystemConfig,*/
117 NULL
118};
119
120
121const pem_event_action conditionally_force_3d_performance_state_tasks[] = {
122 pem_task_conditionally_force_3d_performance_state,
123 NULL
124};
125
126const pem_event_action ungate_all_display_phys_tasks[] = {
127 /* PEM_Task_GetDisplayPhyAccessInfo */
128 NULL
129};
130
131const pem_event_action uninitialize_display_phy_access_tasks[] = {
132 /* PEM_Task_UninitializeDisplayPhysAccess, */
133 NULL
134};
135
136const pem_event_action disable_gfx_voltage_island_power_gating_tasks[] = {
137 /* PEM_Task_DisableVoltageIslandPowerGating, */
138 NULL
139};
140
141const pem_event_action disable_gfx_clock_gating_tasks[] = {
142 pem_task_disable_gfx_clock_gating,
143 NULL
144};
145
146const pem_event_action set_boot_state_tasks[] = {
147 pem_task_get_boot_state_id,
148 pem_task_set_boot_state,
149 NULL
150};
151
152const pem_event_action adjust_power_state_tasks[] = {
153 pem_task_notify_hw_mgr_display_configuration_change,
154 pem_task_adjust_power_state,
155 pem_task_notify_smc_display_config_after_power_state_adjustment,
156 pem_task_update_allowed_performance_levels,
157 /* to do pem_task_Enable_disable_bapm, */
158 NULL
159};
160
161const pem_event_action disable_dynamic_state_management_tasks[] = {
162 pem_task_unregister_interrupts,
163 pem_task_get_boot_state_id,
164 pem_task_disable_dynamic_state_management,
165 NULL
166};
167
168const pem_event_action disable_clock_power_gatings_tasks[] = {
169 pem_task_disable_clock_power_gatings_tasks,
170 NULL
171};
172
173const pem_event_action cleanup_asic_tasks[] = {
174 /* PEM_Task_DisableFPS,*/
175 pem_task_cleanup_asic,
176 NULL
177};
178
179const pem_event_action prepare_for_pnp_stop_tasks[] = {
180 /* PEM_Task_PrepareForPnpStop,*/
181 NULL
182};
183
184const pem_event_action set_power_source_tasks[] = {
185 pem_task_set_power_source,
186 pem_task_notify_hw_of_power_source,
187 NULL
188};
189
190const pem_event_action set_power_saving_state_tasks[] = {
191 pem_task_reset_power_saving_state,
192 pem_task_get_power_saving_state,
193 pem_task_set_power_saving_state,
194 /* PEM_Task_ResetODDCState,
195 * PEM_Task_GetODDCState,
196 * PEM_Task_SetODDCState,*/
197 NULL
198};
199
200const pem_event_action enable_disable_fps_tasks[] = {
201 /* PEM_Task_EnableDisableFPS,*/
202 NULL
203};
204
205const pem_event_action set_nbmcu_state_tasks[] = {
206 /* PEM_Task_NBMCUStateChange,*/
207 NULL
208};
209
210const pem_event_action reset_hardware_dc_notification_tasks[] = {
211 /* PEM_Task_ResetHardwareDCNotification,*/
212 NULL
213};
214
215
216const pem_event_action notify_smu_suspend_tasks[] = {
217 /* PEM_Task_NotifySMUSuspend,*/
218 NULL
219};
220
221const pem_event_action disable_smc_firmware_ctf_tasks[] = {
222 /* PEM_Task_DisableSMCFirmwareCTF,*/
223 NULL
224};
225
226const pem_event_action disable_fps_tasks[] = {
227 /* PEM_Task_DisableFPS,*/
228 NULL
229};
230
231const pem_event_action vari_bright_suspend_tasks[] = {
232 /* PEM_Task_VariBright_Suspend,*/
233 NULL
234};
235
236const pem_event_action reset_fan_speed_to_default_tasks[] = {
237 /* PEM_Task_ResetFanSpeedToDefault,*/
238 NULL
239};
240
241const pem_event_action power_down_asic_tasks[] = {
242 /* PEM_Task_DisableFPS,*/
243 pem_task_power_down_asic,
244 NULL
245};
246
247const pem_event_action disable_stutter_mode_tasks[] = {
248 /* PEM_Task_DisableStutterMode,*/
249 NULL
250};
251
252const pem_event_action set_connected_standby_tasks[] = {
253 /* PEM_Task_SetConnectedStandby,*/
254 NULL
255};
256
257const pem_event_action block_hw_access_tasks[] = {
258 pem_task_block_hw_access,
259 NULL
260};
261
262const pem_event_action unblock_hw_access_tasks[] = {
263 pem_task_un_block_hw_access,
264 NULL
265};
266
267const pem_event_action resume_connected_standby_tasks[] = {
268 /* PEM_Task_ResumeConnectedStandby,*/
269 NULL
270};
271
272const pem_event_action notify_smu_resume_tasks[] = {
273 /* PEM_Task_NotifySMUResume,*/
274 NULL
275};
276
277const pem_event_action reset_display_configCounter_tasks[] = {
278 pem_task_reset_display_phys_access,
279 NULL
280};
281
282const pem_event_action update_dal_configuration_tasks[] = {
283 /* PEM_Task_CheckVBlankTime,*/
284 NULL
285};
286
287const pem_event_action vari_bright_resume_tasks[] = {
288 /* PEM_Task_VariBright_Resume,*/
289 NULL
290};
291
292const pem_event_action notify_hw_power_source_tasks[] = {
293 pem_task_notify_hw_of_power_source,
294 NULL
295};
296
297const pem_event_action process_vbios_event_info_tasks[] = {
298 /* PEM_Task_ProcessVbiosEventInfo,*/
299 NULL
300};
301
302const pem_event_action enable_gfx_clock_gating_tasks[] = {
303 pem_task_enable_gfx_clock_gating,
304 NULL
305};
306
307const pem_event_action enable_gfx_voltage_island_power_gating_tasks[] = {
308 pem_task_enable_voltage_island_power_gating,
309 NULL
310};
311
312const pem_event_action reset_clock_gating_tasks[] = {
313 /* PEM_Task_ResetClockGating*/
314 NULL
315};
316
317const pem_event_action notify_smu_vpu_recovery_end_tasks[] = {
318 /* PEM_Task_NotifySmuVPURecoveryEnd,*/
319 NULL
320};
321
322const pem_event_action disable_vpu_cap_tasks[] = {
323 /* PEM_Task_DisableVPUCap,*/
324 NULL
325};
326
327const pem_event_action execute_escape_sequence_tasks[] = {
328 /* PEM_Task_ExecuteEscapesequence,*/
329 NULL
330};
331
332const pem_event_action notify_power_state_change_tasks[] = {
333 pem_task_notify_power_state_change,
334 NULL
335};
336
337const pem_event_action enable_cgpg_tasks[] = {
338 pem_task_enable_cgpg,
339 NULL
340};
341
342const pem_event_action disable_cgpg_tasks[] = {
343 pem_task_disable_cgpg,
344 NULL
345};
346
347const pem_event_action enable_user_2d_performance_tasks[] = {
348 /* PEM_Task_SetUser2DPerformanceFlag,*/
349 /* PEM_Task_UpdateUser2DPerformanceEnableEvents,*/
350 NULL
351};
352
353const pem_event_action add_user_2d_performance_state_tasks[] = {
354 /* PEM_Task_Get2DPerformanceTemplate,*/
355 /* PEM_Task_AllocateNewPowerStateMemory,*/
356 /* PEM_Task_CopyNewPowerStateInfo,*/
357 /* PEM_Task_UpdateNewPowerStateClocks,*/
358 /* PEM_Task_UpdateNewPowerStateUser2DPerformanceFlag,*/
359 /* PEM_Task_AddPowerState,*/
360 /* PEM_Task_ReleaseNewPowerStateMemory,*/
361 NULL
362};
363
364const pem_event_action delete_user_2d_performance_state_tasks[] = {
365 /* PEM_Task_GetCurrentUser2DPerformanceStateID,*/
366 /* PEM_Task_DeletePowerState,*/
367 /* PEM_Task_SetCurrentUser2DPerformanceStateID,*/
368 NULL
369};
370
371const pem_event_action disable_user_2d_performance_tasks[] = {
372 /* PEM_Task_ResetUser2DPerformanceFlag,*/
373 /* PEM_Task_UpdateUser2DPerformanceDisableEvents,*/
374 NULL
375};
376
377const pem_event_action enable_stutter_mode_tasks[] = {
378 pem_task_enable_stutter_mode,
379 NULL
380};
381
382const pem_event_action enable_disable_bapm_tasks[] = {
383 /*PEM_Task_EnableDisableBAPM,*/
384 NULL
385};
386
387const pem_event_action reset_boot_state_tasks[] = {
388 pem_task_reset_boot_state,
389 NULL
390};
391
392const pem_event_action create_new_user_performance_state_tasks[] = {
393 pem_task_create_user_performance_state,
394 NULL
395};
396
397const pem_event_action initialize_thermal_controller_tasks[] = {
398 pem_task_initialize_thermal_controller,
399 NULL
400};
401
402const pem_event_action uninitialize_thermal_controller_tasks[] = {
403 pem_task_uninitialize_thermal_controller,
404 NULL
405};
406
407const pem_event_action set_cpu_power_state[] = {
408 pem_task_set_cpu_power_state,
409 NULL
410}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
new file mode 100644
index 000000000000..7714cb927428
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _EVENT_SUB_CHAINS_H_
25#define _EVENT_SUB_CHAINS_H_
26
27#include "eventmgr.h"
28
29extern const pem_event_action reset_display_phy_access_tasks[];
30extern const pem_event_action broadcast_power_policy_tasks[];
31extern const pem_event_action unregister_interrupt_tasks[];
32extern const pem_event_action disable_GFX_voltage_island_powergating_tasks[];
33extern const pem_event_action disable_GFX_clockgating_tasks[];
34extern const pem_event_action block_adjust_power_state_tasks[];
35extern const pem_event_action unblock_adjust_power_state_tasks[];
36extern const pem_event_action set_performance_state_tasks[];
37extern const pem_event_action get_2D_performance_state_tasks[];
38extern const pem_event_action conditionally_force3D_performance_state_tasks[];
39extern const pem_event_action process_vbios_eventinfo_tasks[];
40extern const pem_event_action enable_dynamic_state_management_tasks[];
41extern const pem_event_action enable_clock_power_gatings_tasks[];
42extern const pem_event_action conditionally_force3D_performance_state_tasks[];
43extern const pem_event_action setup_asic_tasks[];
44extern const pem_event_action power_budget_tasks[];
45extern const pem_event_action system_config_tasks[];
46extern const pem_event_action get_2d_performance_state_tasks[];
47extern const pem_event_action conditionally_force_3d_performance_state_tasks[];
48extern const pem_event_action ungate_all_display_phys_tasks[];
49extern const pem_event_action uninitialize_display_phy_access_tasks[];
50extern const pem_event_action disable_gfx_voltage_island_power_gating_tasks[];
51extern const pem_event_action disable_gfx_clock_gating_tasks[];
52extern const pem_event_action set_boot_state_tasks[];
53extern const pem_event_action adjust_power_state_tasks[];
54extern const pem_event_action disable_dynamic_state_management_tasks[];
55extern const pem_event_action disable_clock_power_gatings_tasks[];
56extern const pem_event_action cleanup_asic_tasks[];
57extern const pem_event_action prepare_for_pnp_stop_tasks[];
58extern const pem_event_action set_power_source_tasks[];
59extern const pem_event_action set_power_saving_state_tasks[];
60extern const pem_event_action enable_disable_fps_tasks[];
61extern const pem_event_action set_nbmcu_state_tasks[];
62extern const pem_event_action reset_hardware_dc_notification_tasks[];
63extern const pem_event_action notify_smu_suspend_tasks[];
64extern const pem_event_action disable_smc_firmware_ctf_tasks[];
65extern const pem_event_action disable_fps_tasks[];
66extern const pem_event_action vari_bright_suspend_tasks[];
67extern const pem_event_action reset_fan_speed_to_default_tasks[];
68extern const pem_event_action power_down_asic_tasks[];
69extern const pem_event_action disable_stutter_mode_tasks[];
70extern const pem_event_action set_connected_standby_tasks[];
71extern const pem_event_action block_hw_access_tasks[];
72extern const pem_event_action unblock_hw_access_tasks[];
73extern const pem_event_action resume_connected_standby_tasks[];
74extern const pem_event_action notify_smu_resume_tasks[];
75extern const pem_event_action reset_display_configCounter_tasks[];
76extern const pem_event_action update_dal_configuration_tasks[];
77extern const pem_event_action vari_bright_resume_tasks[];
78extern const pem_event_action notify_hw_power_source_tasks[];
79extern const pem_event_action process_vbios_event_info_tasks[];
80extern const pem_event_action enable_gfx_clock_gating_tasks[];
81extern const pem_event_action enable_gfx_voltage_island_power_gating_tasks[];
82extern const pem_event_action reset_clock_gating_tasks[];
83extern const pem_event_action notify_smu_vpu_recovery_end_tasks[];
84extern const pem_event_action disable_vpu_cap_tasks[];
85extern const pem_event_action execute_escape_sequence_tasks[];
86extern const pem_event_action notify_power_state_change_tasks[];
87extern const pem_event_action enable_cgpg_tasks[];
88extern const pem_event_action disable_cgpg_tasks[];
89extern const pem_event_action enable_user_2d_performance_tasks[];
90extern const pem_event_action add_user_2d_performance_state_tasks[];
91extern const pem_event_action delete_user_2d_performance_state_tasks[];
92extern const pem_event_action disable_user_2d_performance_tasks[];
93extern const pem_event_action enable_stutter_mode_tasks[];
94extern const pem_event_action enable_disable_bapm_tasks[];
95extern const pem_event_action reset_boot_state_tasks[];
96extern const pem_event_action create_new_user_performance_state_tasks[];
97extern const pem_event_action initialize_thermal_controller_tasks[];
98extern const pem_event_action uninitialize_thermal_controller_tasks[];
99extern const pem_event_action set_cpu_power_state[];
100#endif /* _EVENT_SUB_CHAINS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
new file mode 100644
index 000000000000..5cd123472db4
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -0,0 +1,438 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "eventmgr.h"
25#include "eventinit.h"
26#include "eventmanagement.h"
27#include "eventmanager.h"
28#include "hardwaremanager.h"
29#include "eventtasks.h"
30#include "power_state.h"
31#include "hwmgr.h"
32#include "amd_powerplay.h"
33#include "psm.h"
34
35#define TEMP_RANGE_MIN (90 * 1000)
36#define TEMP_RANGE_MAX (120 * 1000)
37
38int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
39{
40
41 if (pem_is_hw_access_blocked(eventmgr))
42 return 0;
43
44 phm_force_dpm_levels(eventmgr->hwmgr, AMD_DPM_FORCED_LEVEL_AUTO);
45
46 return 0;
47}
48
49/* eventtasks_generic.c */
50int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
51{
52 struct pp_hwmgr *hwmgr;
53
54 if (pem_is_hw_access_blocked(eventmgr))
55 return 0;
56
57 hwmgr = eventmgr->hwmgr;
58 if (event_data->pnew_power_state != NULL)
59 hwmgr->request_ps = event_data->pnew_power_state;
60
61 if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
62 psm_adjust_power_state_dynamic(eventmgr, event_data->skip_state_adjust_rules);
63 else
64 psm_adjust_power_state_static(eventmgr, event_data->skip_state_adjust_rules);
65
66 return 0;
67}
68
69int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
70{
71 return phm_power_down_asic(eventmgr->hwmgr);
72}
73
74int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
75{
76 if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
77 return psm_set_states(eventmgr, &(event_data->requested_state_id));
78
79 return 0;
80}
81
82int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
83{
84 /* TODO */
85 return 0;
86}
87
88int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
89{
90 /* TODO */
91 return 0;
92}
93
94int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
95{
96 /* TODO */
97 return 0;
98}
99
100int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
101{
102 /* TODO */
103 return 0;
104}
105
106int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
107{
108 return pem_unregister_interrupts(eventmgr);
109}
110
111int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
112{
113 int result;
114
115 result = psm_get_state_by_classification(eventmgr,
116 PP_StateClassificationFlag_Boot,
117 &(event_data->requested_state_id)
118 );
119
120 if (0 == result)
121 pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
122 else
123 pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
124
125 return result;
126}
127
128int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
129{
130 return phm_enable_dynamic_state_management(eventmgr->hwmgr);
131}
132
133int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
134{
135 /* TODO */
136 return 0;
137}
138
139int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
140{
141 return phm_enable_clock_power_gatings(eventmgr->hwmgr);
142}
143
144int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
145{
146 return phm_powerdown_uvd(eventmgr->hwmgr);
147}
148
149int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
150{
151 phm_powergate_uvd(eventmgr->hwmgr, true);
152 phm_powergate_vce(eventmgr->hwmgr, true);
153 return 0;
154}
155
156int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
157{
158 /* TODO */
159 return 0;
160}
161
162int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
163{
164 /* TODO */
165 return 0;
166}
167
168int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
169{
170 /* TODO */
171 return 0;
172}
173
174int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
175{
176 return phm_setup_asic(eventmgr->hwmgr);
177}
178
179int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
180{
181 /* TODO */
182 return 0;
183}
184
185int pem_task_store_dal_configuration(struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config)
186{
187 /* TODO */
188 return 0;
189 /*phm_store_dal_configuration_data(eventmgr->hwmgr, display_config) */
190}
191
192int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
193{
194 if (pem_is_hw_access_blocked(eventmgr))
195 return 0;
196
197 return phm_display_configuration_changed(eventmgr->hwmgr);
198}
199
200int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
201{
202 return 0;
203}
204
205int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
206{
207 if (pem_is_hw_access_blocked(eventmgr))
208 return 0;
209
210 return phm_notify_smc_display_config_after_ps_adjustment(eventmgr->hwmgr);
211}
212
213int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
214{
215 eventmgr->block_adjust_power_state = true;
216 /* to do PHM_ResetIPSCounter(pEventMgr->pHwMgr);*/
217 return 0;
218}
219
220int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
221{
222 eventmgr->block_adjust_power_state = false;
223 return 0;
224}
225
226int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
227{
228 /* TODO */
229 return 0;
230}
231
232int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
233{
234 /* TODO */
235 return 0;
236}
237
238int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
239{
240 /* TODO */
241 return 0;
242}
243
244int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
245{
246 /* TODO */
247 return 0;
248}
249
250int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
251{
252 return phm_set_cpu_power_state(eventmgr->hwmgr);
253}
254
255/*powersaving*/
256
257int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
258{
259 /* TODO */
260 return 0;
261}
262
263int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
264{
265 /* TODO */
266 return 0;
267}
268
269int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
270{
271 /* TODO */
272 return 0;
273}
274
275int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
276{
277 /* TODO */
278 return 0;
279}
280
281int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
282{
283 /* TODO */
284 return 0;
285}
286
287int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
288{
289 /* TODO */
290 return 0;
291}
292
293int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
294{
295 /* TODO */
296 return 0;
297}
298
299int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
300{
301 /* TODO */
302 return 0;
303}
304
305int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
306{
307 /* TODO */
308 return 0;
309}
310
311int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
312{
313 /* TODO */
314 return 0;
315}
316
317int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
318{
319 /* TODO */
320 return 0;
321}
322
323int pem_task_enable_clock_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
324{
325 /* TODO */
326 return 0;
327}
328
329
330int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
331{
332 /* TODO */
333 return 0;
334}
335
336int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
337{
338 /* TODO */
339 return 0;
340}
341
342
343/* performance */
344int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
345{
346 if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
347 return psm_set_states(eventmgr, &(event_data->requested_state_id));
348
349 return 0;
350}
351
352int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
353{
354 /* TODO */
355 return 0;
356}
357
358int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
359{
360 /* TODO */
361 return 0;
362}
363
364int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
365{
366 int result;
367
368 if (eventmgr->features[PP_Feature_PowerPlay].supported &&
369 !(eventmgr->features[PP_Feature_PowerPlay].enabled))
370 result = psm_get_state_by_classification(eventmgr,
371 PP_StateClassificationFlag_Boot,
372 &(event_data->requested_state_id));
373 else if (eventmgr->features[PP_Feature_User2DPerformance].enabled)
374 result = psm_get_state_by_classification(eventmgr,
375 PP_StateClassificationFlag_User2DPerformance,
376 &(event_data->requested_state_id));
377 else
378 result = psm_get_ui_state(eventmgr, PP_StateUILabel_Performance,
379 &(event_data->requested_state_id));
380
381 if (0 == result)
382 pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
383 else
384 pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
385
386 return result;
387}
388
389int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
390{
391 struct pp_power_state *state;
392 int table_entries;
393 struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
394 int i;
395
396 table_entries = hwmgr->num_ps;
397 state = hwmgr->ps;
398
399restart_search:
400 for (i = 0; i < table_entries; i++) {
401 if (state->classification.ui_label & event_data->requested_ui_label) {
402 event_data->pnew_power_state = state;
403 return 0;
404 }
405 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
406 }
407
408 switch (event_data->requested_ui_label) {
409 case PP_StateUILabel_Battery:
410 case PP_StateUILabel_Balanced:
411 event_data->requested_ui_label = PP_StateUILabel_Performance;
412 goto restart_search;
413 default:
414 break;
415 }
416 return -1;
417}
418
419int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
420{
421 struct PP_TemperatureRange range;
422
423 range.max = TEMP_RANGE_MAX;
424 range.min = TEMP_RANGE_MIN;
425
426 if (eventmgr == NULL || eventmgr->platform_descriptor == NULL)
427 return -EINVAL;
428
429 if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ThermalController))
430 return phm_start_thermal_controller(eventmgr->hwmgr, &range);
431
432 return 0;
433}
434
435int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
436{
437 return phm_stop_thermal_controller(eventmgr->hwmgr);
438}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
new file mode 100644
index 000000000000..6c6297e3b598
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _EVENT_TASKS_H_
25#define _EVENT_TASKS_H_
26#include "eventmgr.h"
27
28struct amd_display_configuration;
29
30/* eventtasks_generic.c */
31int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
32int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
33int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
34int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
35int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
36int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
37int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
38int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
39int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
40int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
41int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
42int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
43int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
44int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
45int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
46int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
47int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
48int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
49int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
50int pem_task_store_dal_configuration (struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config);
51int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
52int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
53int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
54int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
55int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
56int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
57int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
58int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
59int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
60int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
61/*powersaving*/
62
63int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
64int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
65int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
66int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
67int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
68int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
69int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
70int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
71int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
72int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
73int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
74int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
75int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
76int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
77
78/* performance */
79int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
80int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
81int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
82int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
83int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
84/*thermal */
85int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
86int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
87
88#endif /* _EVENT_TASKS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
new file mode 100644
index 000000000000..a46225c0fc01
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "psm.h"
24
25int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id)
26{
27 struct pp_power_state *state;
28 int table_entries;
29 struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
30 int i;
31
32 table_entries = hwmgr->num_ps;
33 state = hwmgr->ps;
34
35 for (i = 0; i < table_entries; i++) {
36 if (state->classification.ui_label & ui_label) {
37 *state_id = state->id;
38 return 0;
39 }
40 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
41 }
42 return -1;
43}
44
45int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id)
46{
47 struct pp_power_state *state;
48 int table_entries;
49 struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
50 int i;
51
52 table_entries = hwmgr->num_ps;
53 state = hwmgr->ps;
54
55 for (i = 0; i < table_entries; i++) {
56 if (state->classification.flags & flag) {
57 *state_id = state->id;
58 return 0;
59 }
60 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
61 }
62 return -1;
63}
64
65int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
66{
67 struct pp_power_state *state;
68 int table_entries;
69 struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
70 int i;
71
72 table_entries = hwmgr->num_ps;
73 state = hwmgr->ps;
74
75 for (i = 0; i < table_entries; i++) {
76 if (state->id == *state_id) {
77 hwmgr->request_ps = state;
78 return 0;
79 }
80 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
81 }
82 return -1;
83}
84
85int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
86{
87
88 struct pp_power_state *pcurrent;
89 struct pp_power_state *requested;
90 struct pp_hwmgr *hwmgr;
91 bool equal;
92
93 if (skip)
94 return 0;
95
96 hwmgr = eventmgr->hwmgr;
97 pcurrent = hwmgr->current_ps;
98 requested = hwmgr->request_ps;
99
100 if (requested == NULL)
101 return 0;
102
103 if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
104 equal = false;
105
106 if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
107 phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
108 phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
109 hwmgr->current_ps = requested;
110 }
111 return 0;
112}
113
114int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip)
115{
116 return 0;
117}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h
new file mode 100644
index 000000000000..fbdff3e02aa3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "eventmgr.h"
24#include "eventinit.h"
25#include "eventmanagement.h"
26#include "eventmanager.h"
27#include "power_state.h"
28#include "hardwaremanager.h"
29
30int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id);
31
32int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id);
33
34int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id);
35
36int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip);
37
38int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
new file mode 100644
index 000000000000..b664e34dbcc0
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -0,0 +1,15 @@
1#
2# Makefile for the 'hw manager' sub-component of powerplay.
3# It provides the hardware management services for the driver.
4
5HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
6 hardwaremanager.o pp_acpi.o cz_hwmgr.o \
7 cz_clockpowergating.o \
8 tonga_processpptables.o ppatomctrl.o \
9 tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
10 fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
11 fiji_clockpowergating.o fiji_thermal.o
12
13AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
14
15AMD_POWERPLAY_FILES += $(AMD_PP_HWMGR)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
new file mode 100644
index 000000000000..ad7700822a1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -0,0 +1,252 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "cz_clockpowergating.h"
26#include "cz_ppsmc.h"
27
28/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
29 0 GFX0L (3:0), (27:24),
30 1 GFX0H (7:4), (31:28),
31 2 GFX1L (3:0), (19:16),
32 3 GFX1H (7:4), (23:20),
33 4 DDIL (3:0), (11: 8),
34 5 DDIH (7:4), (15:12),
35 6 DDI2L (3:0), ( 3: 0),
36 7 DDI2H (7:4), ( 7: 4),
37*/
38#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
39#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
40
41
42int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
43{
44 int ret = 0;
45
46 switch (block) {
47 case PHM_AsicBlock_UVD_MVC:
48 case PHM_AsicBlock_UVD:
49 case PHM_AsicBlock_UVD_HD:
50 case PHM_AsicBlock_UVD_SD:
51 if (gating == PHM_ClockGateSetting_StaticOff)
52 ret = cz_dpm_powerdown_uvd(hwmgr);
53 else
54 ret = cz_dpm_powerup_uvd(hwmgr);
55 break;
56 case PHM_AsicBlock_GFX:
57 default:
58 break;
59 }
60
61 return ret;
62}
63
64
65bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
66{
67 return true;
68}
69
70
71int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
72{
73 return 0;
74}
75
76int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
77{
78 /* TODO */
79 return 0;
80}
81
82int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
83{
84 /* TODO */
85 return 0;
86}
87
88int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
89{
90 /* TODO */
91 return 0;
92}
93
94int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
95{
96 /* TODO */
97 return 0;
98}
99
100int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
101{
102 /* TODO */
103 return 0;
104}
105
106static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
107{
108 return 0;
109}
110
111static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
112{
113 return 0;
114}
115
116int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
117{
118 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
119 uint32_t dpm_features = 0;
120
121 if (enable &&
122 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
123 PHM_PlatformCaps_UVDDPM)) {
124 cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
125 dpm_features |= UVD_DPM_MASK;
126 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
127 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
128 } else {
129 dpm_features |= UVD_DPM_MASK;
130 cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
131 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
132 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
133 }
134 return 0;
135}
136
137int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
138{
139 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
140 uint32_t dpm_features = 0;
141
142 if (enable && phm_cap_enabled(
143 hwmgr->platform_descriptor.platformCaps,
144 PHM_PlatformCaps_VCEDPM)) {
145 cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
146 dpm_features |= VCE_DPM_MASK;
147 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
148 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
149 } else {
150 dpm_features |= VCE_DPM_MASK;
151 cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
152 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
153 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
154 }
155
156 return 0;
157}
158
159
160int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
161{
162 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
163
164 if (cz_hwmgr->uvd_power_gated == bgate)
165 return 0;
166
167 cz_hwmgr->uvd_power_gated = bgate;
168
169 if (bgate) {
170 cgs_set_clockgating_state(hwmgr->device,
171 AMD_IP_BLOCK_TYPE_UVD,
172 AMD_CG_STATE_UNGATE);
173 cgs_set_powergating_state(hwmgr->device,
174 AMD_IP_BLOCK_TYPE_UVD,
175 AMD_PG_STATE_GATE);
176 cz_dpm_update_uvd_dpm(hwmgr, true);
177 cz_dpm_powerdown_uvd(hwmgr);
178 } else {
179 cz_dpm_powerup_uvd(hwmgr);
180 cgs_set_clockgating_state(hwmgr->device,
181 AMD_IP_BLOCK_TYPE_UVD,
182 AMD_PG_STATE_GATE);
183 cgs_set_powergating_state(hwmgr->device,
184 AMD_IP_BLOCK_TYPE_UVD,
185 AMD_CG_STATE_UNGATE);
186 cz_dpm_update_uvd_dpm(hwmgr, false);
187 }
188
189 return 0;
190}
191
192int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
193{
194 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
195
196 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_VCEPowerGating)) {
198 if (cz_hwmgr->vce_power_gated != bgate) {
199 if (bgate) {
200 cgs_set_clockgating_state(
201 hwmgr->device,
202 AMD_IP_BLOCK_TYPE_VCE,
203 AMD_CG_STATE_UNGATE);
204 cgs_set_powergating_state(
205 hwmgr->device,
206 AMD_IP_BLOCK_TYPE_VCE,
207 AMD_PG_STATE_GATE);
208 cz_enable_disable_vce_dpm(hwmgr, false);
209 /* TODO: to figure out why vce can't be poweroff*/
210 cz_hwmgr->vce_power_gated = true;
211 } else {
212 cz_dpm_powerup_vce(hwmgr);
213 cz_hwmgr->vce_power_gated = false;
214 cgs_set_clockgating_state(
215 hwmgr->device,
216 AMD_IP_BLOCK_TYPE_VCE,
217 AMD_PG_STATE_GATE);
218 cgs_set_powergating_state(
219 hwmgr->device,
220 AMD_IP_BLOCK_TYPE_VCE,
221 AMD_CG_STATE_UNGATE);
222 cz_dpm_update_vce_dpm(hwmgr);
223 cz_enable_disable_vce_dpm(hwmgr, true);
224 return 0;
225 }
226 }
227 } else {
228 cz_dpm_update_vce_dpm(hwmgr);
229 cz_enable_disable_vce_dpm(hwmgr, true);
230 return 0;
231 }
232
233 if (!cz_hwmgr->vce_power_gated)
234 cz_dpm_update_vce_dpm(hwmgr);
235
236 return 0;
237}
238
239
240static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
241 /*we don't need an exit table here, because there is only D3 cold on Kv*/
242 { phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
243 { phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
244 /* to do { NULL, cz_tf_xdma_power_gating_enable }, */
245 { NULL, NULL }
246};
247
248struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
249 0,
250 PHM_MasterTableFlag_None,
251 cz_enable_clock_power_gatings_list
252};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
new file mode 100644
index 000000000000..bbbc0571320e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _CZ_CLOCK_POWER_GATING_H_
25#define _CZ_CLOCK_POWER_GATING_H_
26
27#include "cz_hwmgr.h"
28#include "pp_asicblocks.h"
29
30extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
31extern struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
32extern struct phm_master_table_header cz_phm_disable_clock_power_gatings_master;
33extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
34extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
35extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
36extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
37#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
new file mode 100644
index 000000000000..0874ab42ee95
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -0,0 +1,1737 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include "atom-types.h"
27#include "atombios.h"
28#include "processpptables.h"
29#include "pp_debug.h"
30#include "cgs_common.h"
31#include "smu/smu_8_0_d.h"
32#include "smu8_fusion.h"
33#include "smu/smu_8_0_sh_mask.h"
34#include "smumgr.h"
35#include "hwmgr.h"
36#include "hardwaremanager.h"
37#include "cz_ppsmc.h"
38#include "cz_hwmgr.h"
39#include "power_state.h"
40#include "cz_clockpowergating.h"
41#include "pp_debug.h"
42
43#define ixSMUSVI_NB_CURRENTVID 0xD8230044
44#define CURRENT_NB_VID_MASK 0xff000000
45#define CURRENT_NB_VID__SHIFT 24
46#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
47#define CURRENT_GFX_VID_MASK 0xff000000
48#define CURRENT_GFX_VID__SHIFT 24
49
50static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic;
51
52static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps)
53{
54 if (PhwCz_Magic != hw_ps->magic)
55 return NULL;
56
57 return (struct cz_power_state *)hw_ps;
58}
59
60static const struct cz_power_state *cast_const_PhwCzPowerState(
61 const struct pp_hw_power_state *hw_ps)
62{
63 if (PhwCz_Magic != hw_ps->magic)
64 return NULL;
65
66 return (struct cz_power_state *)hw_ps;
67}
68
69uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
70 uint32_t clock, uint32_t msg)
71{
72 int i = 0;
73 struct phm_vce_clock_voltage_dependency_table *ptable =
74 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
75
76 switch (msg) {
77 case PPSMC_MSG_SetEclkSoftMin:
78 case PPSMC_MSG_SetEclkHardMin:
79 for (i = 0; i < (int)ptable->count; i++) {
80 if (clock <= ptable->entries[i].ecclk)
81 break;
82 }
83 break;
84
85 case PPSMC_MSG_SetEclkSoftMax:
86 case PPSMC_MSG_SetEclkHardMax:
87 for (i = ptable->count - 1; i >= 0; i--) {
88 if (clock >= ptable->entries[i].ecclk)
89 break;
90 }
91 break;
92
93 default:
94 break;
95 }
96
97 return i;
98}
99
100static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
101 uint32_t clock, uint32_t msg)
102{
103 int i = 0;
104 struct phm_clock_voltage_dependency_table *table =
105 hwmgr->dyn_state.vddc_dependency_on_sclk;
106
107 switch (msg) {
108 case PPSMC_MSG_SetSclkSoftMin:
109 case PPSMC_MSG_SetSclkHardMin:
110 for (i = 0; i < (int)table->count; i++) {
111 if (clock <= table->entries[i].clk)
112 break;
113 }
114 break;
115
116 case PPSMC_MSG_SetSclkSoftMax:
117 case PPSMC_MSG_SetSclkHardMax:
118 for (i = table->count - 1; i >= 0; i--) {
119 if (clock >= table->entries[i].clk)
120 break;
121 }
122 break;
123
124 default:
125 break;
126 }
127 return i;
128}
129
130static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
131 uint32_t clock, uint32_t msg)
132{
133 int i = 0;
134 struct phm_uvd_clock_voltage_dependency_table *ptable =
135 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
136
137 switch (msg) {
138 case PPSMC_MSG_SetUvdSoftMin:
139 case PPSMC_MSG_SetUvdHardMin:
140 for (i = 0; i < (int)ptable->count; i++) {
141 if (clock <= ptable->entries[i].vclk)
142 break;
143 }
144 break;
145
146 case PPSMC_MSG_SetUvdSoftMax:
147 case PPSMC_MSG_SetUvdHardMax:
148 for (i = ptable->count - 1; i >= 0; i--) {
149 if (clock >= ptable->entries[i].vclk)
150 break;
151 }
152 break;
153
154 default:
155 break;
156 }
157
158 return i;
159}
160
161static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
162{
163 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
164
165 if (cz_hwmgr->max_sclk_level == 0) {
166 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxSclkLevel);
167 cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr->smumgr) + 1;
168 }
169
170 return cz_hwmgr->max_sclk_level;
171}
172
173static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
174{
175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
176 uint32_t i;
177
178 cz_hwmgr->gfx_ramp_step = 256*25/100;
179
180 cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
181
182 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
183 cz_hwmgr->activity_target[i] = CZ_AT_DFLT;
184
185 cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
186 cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
187
188 cz_hwmgr->clock_slow_down_freq = 25000;
189
190 cz_hwmgr->skip_clock_slow_down = 1;
191
192 cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
193
194 cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
195
196 cz_hwmgr->voting_rights_clients = 0x00C00033;
197
198 cz_hwmgr->static_screen_threshold = 8;
199
200 cz_hwmgr->ddi_power_gating_disabled = 0;
201
202 cz_hwmgr->bapm_enabled = 1;
203
204 cz_hwmgr->voltage_drop_threshold = 0;
205
206 cz_hwmgr->gfx_power_gating_threshold = 500;
207
208 cz_hwmgr->vce_slow_sclk_threshold = 20000;
209
210 cz_hwmgr->dce_slow_sclk_threshold = 30000;
211
212 cz_hwmgr->disable_driver_thermal_policy = 1;
213
214 cz_hwmgr->disable_nb_ps3_in_battery = 0;
215
216 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_ABM);
218
219 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
220 PHM_PlatformCaps_NonABMSupportInPPLib);
221
222 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
223 PHM_PlatformCaps_SclkDeepSleep);
224
225 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_DynamicM3Arbiter);
227
228 cz_hwmgr->override_dynamic_mgpg = 1;
229
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_DynamicPatchPowerState);
232
233 cz_hwmgr->thermal_auto_throttling_treshold = 0;
234
235 cz_hwmgr->tdr_clock = 0;
236
237 cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
238
239 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
240 PHM_PlatformCaps_DynamicUVDState);
241
242 cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
243 cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
244 cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
245 cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0;
246
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_DisableVoltageIsland);
249
250 return 0;
251}
252
253static uint32_t cz_convert_8Bit_index_to_voltage(
254 struct pp_hwmgr *hwmgr, uint16_t voltage)
255{
256 return 6200 - (voltage * 25);
257}
258
259static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
260 struct phm_clock_and_voltage_limits *table)
261{
262 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
263 struct cz_sys_info *sys_info = &cz_hwmgr->sys_info;
264 struct phm_clock_voltage_dependency_table *dep_table =
265 hwmgr->dyn_state.vddc_dependency_on_sclk;
266
267 if (dep_table->count > 0) {
268 table->sclk = dep_table->entries[dep_table->count-1].clk;
269 table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr,
270 (uint16_t)dep_table->entries[dep_table->count-1].v);
271 }
272 table->mclk = sys_info->nbp_memory_clock[0];
273 return 0;
274}
275
276static int cz_init_dynamic_state_adjustment_rule_settings(
277 struct pp_hwmgr *hwmgr,
278 ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
279{
280 uint32_t table_size =
281 sizeof(struct phm_clock_voltage_dependency_table) +
282 (7 * sizeof(struct phm_clock_voltage_dependency_record));
283
284 struct phm_clock_voltage_dependency_table *table_clk_vlt =
285 kzalloc(table_size, GFP_KERNEL);
286
287 if (NULL == table_clk_vlt) {
288 printk(KERN_ERR "[ powerplay ] Can not allocate memory!\n");
289 return -ENOMEM;
290 }
291
292 table_clk_vlt->count = 8;
293 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
294 table_clk_vlt->entries[0].v = 0;
295 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
296 table_clk_vlt->entries[1].v = 1;
297 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
298 table_clk_vlt->entries[2].v = 2;
299 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
300 table_clk_vlt->entries[3].v = 3;
301 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
302 table_clk_vlt->entries[4].v = 4;
303 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
304 table_clk_vlt->entries[5].v = 5;
305 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
306 table_clk_vlt->entries[6].v = 6;
307 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
308 table_clk_vlt->entries[7].v = 7;
309 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
310
311 return 0;
312}
313
314static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
315{
316 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
317 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
318 uint32_t i;
319 int result = 0;
320 uint8_t frev, crev;
321 uint16_t size;
322
323 info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table(
324 hwmgr->device,
325 GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
326 &size, &frev, &crev);
327
328 if (crev != 9) {
329 printk(KERN_ERR "[ powerplay ] Unsupported IGP table: %d %d\n", frev, crev);
330 return -EINVAL;
331 }
332
333 if (info == NULL) {
334 printk(KERN_ERR "[ powerplay ] Could not retrieve the Integrated System Info Table!\n");
335 return -EINVAL;
336 }
337
338 cz_hwmgr->sys_info.bootup_uma_clock =
339 le32_to_cpu(info->ulBootUpUMAClock);
340
341 cz_hwmgr->sys_info.bootup_engine_clock =
342 le32_to_cpu(info->ulBootUpEngineClock);
343
344 cz_hwmgr->sys_info.dentist_vco_freq =
345 le32_to_cpu(info->ulDentistVCOFreq);
346
347 cz_hwmgr->sys_info.system_config =
348 le32_to_cpu(info->ulSystemConfig);
349
350 cz_hwmgr->sys_info.bootup_nb_voltage_index =
351 le16_to_cpu(info->usBootUpNBVoltage);
352
353 cz_hwmgr->sys_info.htc_hyst_lmt =
354 (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
355
356 cz_hwmgr->sys_info.htc_tmp_lmt =
357 (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
358
359 if (cz_hwmgr->sys_info.htc_tmp_lmt <=
360 cz_hwmgr->sys_info.htc_hyst_lmt) {
361 printk(KERN_ERR "[ powerplay ] The htcTmpLmt should be larger than htcHystLmt.\n");
362 return -EINVAL;
363 }
364
365 cz_hwmgr->sys_info.nb_dpm_enable =
366 cz_hwmgr->enable_nb_ps_policy &&
367 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
368
369 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
370 if (i < CZ_NUM_NBPMEMORYCLOCK) {
371 cz_hwmgr->sys_info.nbp_memory_clock[i] =
372 le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
373 }
374 cz_hwmgr->sys_info.nbp_n_clock[i] =
375 le32_to_cpu(info->ulNbpStateNClkFreq[i]);
376 }
377
378 for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
379 cz_hwmgr->sys_info.display_clock[i] =
380 le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
381 }
382
383 /* Here use 4 levels, make sure not exceed */
384 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
385 cz_hwmgr->sys_info.nbp_voltage_index[i] =
386 le16_to_cpu(info->usNBPStateVoltage[i]);
387 }
388
389 if (!cz_hwmgr->sys_info.nb_dpm_enable) {
390 for (i = 1; i < CZ_NUM_NBPSTATES; i++) {
391 if (i < CZ_NUM_NBPMEMORYCLOCK) {
392 cz_hwmgr->sys_info.nbp_memory_clock[i] =
393 cz_hwmgr->sys_info.nbp_memory_clock[0];
394 }
395 cz_hwmgr->sys_info.nbp_n_clock[i] =
396 cz_hwmgr->sys_info.nbp_n_clock[0];
397 cz_hwmgr->sys_info.nbp_voltage_index[i] =
398 cz_hwmgr->sys_info.nbp_voltage_index[0];
399 }
400 }
401
402 if (le32_to_cpu(info->ulGPUCapInfo) &
403 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
404 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
405 PHM_PlatformCaps_EnableDFSBypass);
406 }
407
408 cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber;
409
410 cz_construct_max_power_limits_table (hwmgr,
411 &hwmgr->dyn_state.max_clock_voltage_on_ac);
412
413 cz_init_dynamic_state_adjustment_rule_settings(hwmgr,
414 &info->sDISPCLK_Voltage[0]);
415
416 return result;
417}
418
419static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
420{
421 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
422
423 cz_hwmgr->boot_power_level.engineClock =
424 cz_hwmgr->sys_info.bootup_engine_clock;
425
426 cz_hwmgr->boot_power_level.vddcIndex =
427 (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
428
429 cz_hwmgr->boot_power_level.dsDividerIndex = 0;
430
431 cz_hwmgr->boot_power_level.ssDividerIndex = 0;
432
433 cz_hwmgr->boot_power_level.allowGnbSlow = 1;
434
435 cz_hwmgr->boot_power_level.forceNBPstate = 0;
436
437 cz_hwmgr->boot_power_level.hysteresis_up = 0;
438
439 cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
440
441 cz_hwmgr->boot_power_level.display_wm = 0;
442
443 cz_hwmgr->boot_power_level.vce_wm = 0;
444
445 return 0;
446}
447
448static int cz_tf_reset_active_process_mask(struct pp_hwmgr *hwmgr, void *input,
449 void *output, void *storage, int result)
450{
451 return 0;
452}
453
454static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
455 void *output, void *storage, int result)
456{
457 struct SMU8_Fusion_ClkTable *clock_table;
458 int ret;
459 uint32_t i;
460 void *table = NULL;
461 pp_atomctrl_clock_dividers_kong dividers;
462
463 struct phm_clock_voltage_dependency_table *vddc_table =
464 hwmgr->dyn_state.vddc_dependency_on_sclk;
465 struct phm_clock_voltage_dependency_table *vdd_gfx_table =
466 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
467 struct phm_acp_clock_voltage_dependency_table *acp_table =
468 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
469 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
470 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
471 struct phm_vce_clock_voltage_dependency_table *vce_table =
472 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
473
474 if (!hwmgr->need_pp_table_upload)
475 return 0;
476
477 ret = smum_download_powerplay_table(hwmgr->smumgr, &table);
478
479 PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
480 "Fail to get clock table from SMU!", return -EINVAL;);
481
482 clock_table = (struct SMU8_Fusion_ClkTable *)table;
483
484 /* patch clock table */
485 PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
486 "Dependency table entry exceeds max limit!", return -EINVAL;);
487 PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
488 "Dependency table entry exceeds max limit!", return -EINVAL;);
489 PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
490 "Dependency table entry exceeds max limit!", return -EINVAL;);
491 PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
492 "Dependency table entry exceeds max limit!", return -EINVAL;);
493 PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
494 "Dependency table entry exceeds max limit!", return -EINVAL;);
495
496 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
497
498 /* vddc_sclk */
499 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
500 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
501 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
502 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
503
504 atomctrl_get_engine_pll_dividers_kong(hwmgr,
505 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
506 &dividers);
507
508 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
509 (uint8_t)dividers.pll_post_divider;
510
511 /* vddgfx_sclk */
512 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
513 (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
514
515 /* acp breakdown */
516 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
517 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
518 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
519 (i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
520
521 atomctrl_get_engine_pll_dividers_kong(hwmgr,
522 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
523 &dividers);
524
525 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
526 (uint8_t)dividers.pll_post_divider;
527
528
529 /* uvd breakdown */
530 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
531 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
532 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
533 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
534
535 atomctrl_get_engine_pll_dividers_kong(hwmgr,
536 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
537 &dividers);
538
539 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
540 (uint8_t)dividers.pll_post_divider;
541
542 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
543 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
544 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
545 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
546
547 atomctrl_get_engine_pll_dividers_kong(hwmgr,
548 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
549 &dividers);
550
551 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
552 (uint8_t)dividers.pll_post_divider;
553
554 /* vce breakdown */
555 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
556 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
557 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
558 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
559
560
561 atomctrl_get_engine_pll_dividers_kong(hwmgr,
562 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
563 &dividers);
564
565 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
566 (uint8_t)dividers.pll_post_divider;
567
568 }
569 ret = smum_upload_powerplay_table(hwmgr->smumgr);
570
571 return ret;
572}
573
574static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input,
575 void *output, void *storage, int result)
576{
577 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
578 struct phm_clock_voltage_dependency_table *table =
579 hwmgr->dyn_state.vddc_dependency_on_sclk;
580 unsigned long clock = 0, level;
581
582 if (NULL == table || table->count <= 0)
583 return -EINVAL;
584
585 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
586 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
587
588 level = cz_get_max_sclk_level(hwmgr) - 1;
589
590 if (level < table->count)
591 clock = table->entries[level].clk;
592 else
593 clock = table->entries[table->count - 1].clk;
594
595 cz_hwmgr->sclk_dpm.soft_max_clk = clock;
596 cz_hwmgr->sclk_dpm.hard_max_clk = clock;
597
598 return 0;
599}
600
601static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
602 void *output, void *storage, int result)
603{
604 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
605 struct phm_uvd_clock_voltage_dependency_table *table =
606 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
607 unsigned long clock = 0, level;
608
609 if (NULL == table || table->count <= 0)
610 return -EINVAL;
611
612 cz_hwmgr->uvd_dpm.soft_min_clk = 0;
613 cz_hwmgr->uvd_dpm.hard_min_clk = 0;
614
615 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxUvdLevel);
616 level = smum_get_argument(hwmgr->smumgr);
617
618 if (level < table->count)
619 clock = table->entries[level].vclk;
620 else
621 clock = table->entries[table->count - 1].vclk;
622
623 cz_hwmgr->uvd_dpm.soft_max_clk = clock;
624 cz_hwmgr->uvd_dpm.hard_max_clk = clock;
625
626 return 0;
627}
628
629static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
630 void *output, void *storage, int result)
631{
632 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
633 struct phm_vce_clock_voltage_dependency_table *table =
634 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
635 unsigned long clock = 0, level;
636
637 if (NULL == table || table->count <= 0)
638 return -EINVAL;
639
640 cz_hwmgr->vce_dpm.soft_min_clk = 0;
641 cz_hwmgr->vce_dpm.hard_min_clk = 0;
642
643 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxEclkLevel);
644 level = smum_get_argument(hwmgr->smumgr);
645
646 if (level < table->count)
647 clock = table->entries[level].ecclk;
648 else
649 clock = table->entries[table->count - 1].ecclk;
650
651 cz_hwmgr->vce_dpm.soft_max_clk = clock;
652 cz_hwmgr->vce_dpm.hard_max_clk = clock;
653
654 return 0;
655}
656
657static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
658 void *output, void *storage, int result)
659{
660 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
661 struct phm_acp_clock_voltage_dependency_table *table =
662 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
663 unsigned long clock = 0, level;
664
665 if (NULL == table || table->count <= 0)
666 return -EINVAL;
667
668 cz_hwmgr->acp_dpm.soft_min_clk = 0;
669 cz_hwmgr->acp_dpm.hard_min_clk = 0;
670
671 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxAclkLevel);
672 level = smum_get_argument(hwmgr->smumgr);
673
674 if (level < table->count)
675 clock = table->entries[level].acpclk;
676 else
677 clock = table->entries[table->count - 1].acpclk;
678
679 cz_hwmgr->acp_dpm.soft_max_clk = clock;
680 cz_hwmgr->acp_dpm.hard_max_clk = clock;
681 return 0;
682}
683
684static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
685 void *output, void *storage, int result)
686{
687 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
688
689 cz_hwmgr->uvd_power_gated = false;
690 cz_hwmgr->vce_power_gated = false;
691 cz_hwmgr->samu_power_gated = false;
692 cz_hwmgr->acp_power_gated = false;
693 cz_hwmgr->pgacpinit = true;
694
695 return 0;
696}
697
698static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input,
699 void *output, void *storage, int result)
700{
701 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
702
703 cz_hwmgr->low_sclk_interrupt_threshold = 0;
704
705 return 0;
706}
707static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
708 void *input, void *output,
709 void *storage, int result)
710{
711 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
712 struct phm_clock_voltage_dependency_table *table =
713 hwmgr->dyn_state.vddc_dependency_on_sclk;
714
715 unsigned long clock = 0;
716 unsigned long level;
717 unsigned long stable_pstate_sclk;
718 struct PP_Clocks clocks;
719 unsigned long percentage;
720
721 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
722 level = cz_get_max_sclk_level(hwmgr) - 1;
723
724 if (level < table->count)
725 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk;
726 else
727 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
728
729 /*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/
730 clock = clocks.engineClock;
731
732 if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
733 cz_hwmgr->sclk_dpm.hard_min_clk = clock;
734
735 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
736 PPSMC_MSG_SetSclkHardMin,
737 cz_get_sclk_level(hwmgr,
738 cz_hwmgr->sclk_dpm.hard_min_clk,
739 PPSMC_MSG_SetSclkHardMin));
740 }
741
742 clock = cz_hwmgr->sclk_dpm.soft_min_clk;
743
744 /* update minimum clocks for Stable P-State feature */
745 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
746 PHM_PlatformCaps_StablePState)) {
747 percentage = 75;
748 /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */
749 stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
750 percentage) / 100;
751
752 if (clock < stable_pstate_sclk)
753 clock = stable_pstate_sclk;
754 } else {
755 if (clock < hwmgr->gfx_arbiter.sclk)
756 clock = hwmgr->gfx_arbiter.sclk;
757 }
758
759 if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
760 cz_hwmgr->sclk_dpm.soft_min_clk = clock;
761 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
762 PPSMC_MSG_SetSclkSoftMin,
763 cz_get_sclk_level(hwmgr,
764 cz_hwmgr->sclk_dpm.soft_min_clk,
765 PPSMC_MSG_SetSclkSoftMin));
766 }
767
768 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
769 PHM_PlatformCaps_StablePState) &&
770 cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
771 cz_hwmgr->sclk_dpm.soft_max_clk = clock;
772 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
773 PPSMC_MSG_SetSclkSoftMax,
774 cz_get_sclk_level(hwmgr,
775 cz_hwmgr->sclk_dpm.soft_max_clk,
776 PPSMC_MSG_SetSclkSoftMax));
777 }
778
779 return 0;
780}
781
782static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
783 void *input, void *output,
784 void *storage, int result)
785{
786 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
787 PHM_PlatformCaps_SclkDeepSleep)) {
788 uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
789 if (clks == 0)
790 clks = CZ_MIN_DEEP_SLEEP_SCLK;
791
792 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
793
794 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
795 PPSMC_MSG_SetMinDeepSleepSclk,
796 clks);
797 }
798
799 return 0;
800}
801
802static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
803 void *input, void *output,
804 void *storage, int result)
805{
806 struct cz_hwmgr *cz_hwmgr =
807 (struct cz_hwmgr *)(hwmgr->backend);
808
809 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
810 PPSMC_MSG_SetWatermarkFrequency,
811 cz_hwmgr->sclk_dpm.soft_max_clk);
812
813 return 0;
814}
815
816static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr,
817 void *input, void *output,
818 void *storage, int result)
819{
820 return 0;
821}
822
823
824static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
825 void *input, void *output,
826 void *storage, int result)
827{
828 int ret = 0;
829
830 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
831 unsigned long dpm_features = 0;
832
833 if (!cz_hwmgr->is_nb_dpm_enabled) {
834 PP_DBG_LOG("enabling ALL SMU features.\n");
835 dpm_features |= NB_DPM_MASK;
836 ret = smum_send_msg_to_smc_with_parameter(
837 hwmgr->smumgr,
838 PPSMC_MSG_EnableAllSmuFeatures,
839 dpm_features);
840 if (ret == 0)
841 cz_hwmgr->is_nb_dpm_enabled = true;
842 }
843
844 return ret;
845}
846
847static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
848{
849 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
850
851 if (hw_data->is_nb_dpm_enabled) {
852 if (enable) {
853 PP_DBG_LOG("enable Low Memory PState.\n");
854
855 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
856 PPSMC_MSG_EnableLowMemoryPstate,
857 (lock ? 1 : 0));
858 } else {
859 PP_DBG_LOG("disable Low Memory PState.\n");
860
861 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
862 PPSMC_MSG_DisableLowMemoryPstate,
863 (lock ? 1 : 0));
864 }
865 }
866
867 return 0;
868}
869
870static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
871 void *input, void *output,
872 void *storage, int result)
873{
874 bool disable_switch;
875 bool enable_low_mem_state;
876 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
877 const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
878 const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state);
879
880 if (hw_data->sys_info.nb_dpm_enable) {
881 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
882 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
883
884 if (pnew_state->action == FORCE_HIGH)
885 cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
886 else if(pnew_state->action == CANCEL_FORCE_HIGH)
887 cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
888 else
889 cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
890 }
891 return 0;
892}
893
894static struct phm_master_table_item cz_set_power_state_list[] = {
895 {NULL, cz_tf_update_sclk_limit},
896 {NULL, cz_tf_set_deep_sleep_sclk_threshold},
897 {NULL, cz_tf_set_watermark_threshold},
898 {NULL, cz_tf_set_enabled_levels},
899 {NULL, cz_tf_enable_nb_dpm},
900 {NULL, cz_tf_update_low_mem_pstate},
901 {NULL, NULL}
902};
903
904static struct phm_master_table_header cz_set_power_state_master = {
905 0,
906 PHM_MasterTableFlag_None,
907 cz_set_power_state_list
908};
909
910static struct phm_master_table_item cz_setup_asic_list[] = {
911 {NULL, cz_tf_reset_active_process_mask},
912 {NULL, cz_tf_upload_pptable_to_smu},
913 {NULL, cz_tf_init_sclk_limit},
914 {NULL, cz_tf_init_uvd_limit},
915 {NULL, cz_tf_init_vce_limit},
916 {NULL, cz_tf_init_acp_limit},
917 {NULL, cz_tf_init_power_gate_state},
918 {NULL, cz_tf_init_sclk_threshold},
919 {NULL, NULL}
920};
921
922static struct phm_master_table_header cz_setup_asic_master = {
923 0,
924 PHM_MasterTableFlag_None,
925 cz_setup_asic_list
926};
927
928static int cz_tf_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr,
929 void *input, void *output,
930 void *storage, int result)
931{
932 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
933 hw_data->disp_clk_bypass_pending = false;
934 hw_data->disp_clk_bypass = false;
935
936 return 0;
937}
938
939static int cz_tf_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr,
940 void *input, void *output,
941 void *storage, int result)
942{
943 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
944 hw_data->is_nb_dpm_enabled = false;
945
946 return 0;
947}
948
949static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
950 void *input, void *output,
951 void *storage, int result)
952{
953 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
954
955 hw_data->cc6_settings.cc6_setting_changed = false;
956 hw_data->cc6_settings.cpu_pstate_separation_time = 0;
957 hw_data->cc6_settings.cpu_cc6_disable = false;
958 hw_data->cc6_settings.cpu_pstate_disable = false;
959
960 return 0;
961}
962
963static struct phm_master_table_item cz_power_down_asic_list[] = {
964 {NULL, cz_tf_power_up_display_clock_sys_pll},
965 {NULL, cz_tf_clear_nb_dpm_flag},
966 {NULL, cz_tf_reset_cc6_data},
967 {NULL, NULL}
968};
969
970static struct phm_master_table_header cz_power_down_asic_master = {
971 0,
972 PHM_MasterTableFlag_None,
973 cz_power_down_asic_list
974};
975
976static int cz_tf_program_voting_clients(struct pp_hwmgr *hwmgr, void *input,
977 void *output, void *storage, int result)
978{
979 PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
980 PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
981 return 0;
982}
983
984static int cz_tf_start_dpm(struct pp_hwmgr *hwmgr, void *input, void *output,
985 void *storage, int result)
986{
987 int res = 0xff;
988 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
989 unsigned long dpm_features = 0;
990
991 cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
992 dpm_features |= SCLK_DPM_MASK;
993
994 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
995 PPSMC_MSG_EnableAllSmuFeatures,
996 dpm_features);
997
998 return res;
999}
1000
1001static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
1002 void *output, void *storage, int result)
1003{
1004 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1005
1006 cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
1007 cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
1008
1009 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1010 PPSMC_MSG_SetSclkSoftMin,
1011 cz_get_sclk_level(hwmgr,
1012 cz_hwmgr->sclk_dpm.soft_min_clk,
1013 PPSMC_MSG_SetSclkSoftMin));
1014
1015 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1016 PPSMC_MSG_SetSclkSoftMax,
1017 cz_get_sclk_level(hwmgr,
1018 cz_hwmgr->sclk_dpm.soft_max_clk,
1019 PPSMC_MSG_SetSclkSoftMax));
1020
1021 return 0;
1022}
1023
1024int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
1025 void *output, void *storage, int result)
1026{
1027 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1028
1029 cz_hwmgr->acp_boot_level = 0xff;
1030 return 0;
1031}
1032
1033static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
1034 unsigned long check_feature)
1035{
1036 int result;
1037 unsigned long features;
1038
1039 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetFeatureStatus, 0);
1040 if (result == 0) {
1041 features = smum_get_argument(hwmgr->smumgr);
1042 if (features & check_feature)
1043 return true;
1044 }
1045
1046 return result;
1047}
1048
1049static int cz_tf_check_for_dpm_disabled(struct pp_hwmgr *hwmgr, void *input,
1050 void *output, void *storage, int result)
1051{
1052 if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
1053 return PP_Result_TableImmediateExit;
1054 return 0;
1055}
1056
1057static int cz_tf_enable_didt(struct pp_hwmgr *hwmgr, void *input,
1058 void *output, void *storage, int result)
1059{
1060 /* TO DO */
1061 return 0;
1062}
1063
1064static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
1065 void *input, void *output,
1066 void *storage, int result)
1067{
1068 if (!cz_dpm_check_smu_features(hwmgr,
1069 SMU_EnabledFeatureScoreboard_SclkDpmOn))
1070 return PP_Result_TableImmediateExit;
1071 return 0;
1072}
1073
1074static struct phm_master_table_item cz_disable_dpm_list[] = {
1075 { NULL, cz_tf_check_for_dpm_enabled},
1076 {NULL, NULL},
1077};
1078
1079
1080static struct phm_master_table_header cz_disable_dpm_master = {
1081 0,
1082 PHM_MasterTableFlag_None,
1083 cz_disable_dpm_list
1084};
1085
1086static struct phm_master_table_item cz_enable_dpm_list[] = {
1087 { NULL, cz_tf_check_for_dpm_disabled },
1088 { NULL, cz_tf_program_voting_clients },
1089 { NULL, cz_tf_start_dpm},
1090 { NULL, cz_tf_program_bootup_state},
1091 { NULL, cz_tf_enable_didt },
1092 { NULL, cz_tf_reset_acp_boot_level },
1093 {NULL, NULL},
1094};
1095
1096static struct phm_master_table_header cz_enable_dpm_master = {
1097 0,
1098 PHM_MasterTableFlag_None,
1099 cz_enable_dpm_list
1100};
1101
1102static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1103 struct pp_power_state *prequest_ps,
1104 const struct pp_power_state *pcurrent_ps)
1105{
1106 struct cz_power_state *cz_ps =
1107 cast_PhwCzPowerState(&prequest_ps->hardware);
1108
1109 const struct cz_power_state *cz_current_ps =
1110 cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
1111
1112 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1113 struct PP_Clocks clocks;
1114 bool force_high;
1115 unsigned long num_of_active_displays = 4;
1116
1117 cz_ps->evclk = hwmgr->vce_arbiter.evclk;
1118 cz_ps->ecclk = hwmgr->vce_arbiter.ecclk;
1119
1120 cz_ps->need_dfs_bypass = true;
1121
1122 cz_hwmgr->video_start = (hwmgr->uvd_arbiter.vclk != 0 || hwmgr->uvd_arbiter.dclk != 0 ||
1123 hwmgr->vce_arbiter.evclk != 0 || hwmgr->vce_arbiter.ecclk != 0);
1124
1125 cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1126
1127 /* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */
1128 /* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */
1129 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1130 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1131 else
1132 clocks.memoryClock = 0;
1133
1134 if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
1135 clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
1136
1137 force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
1138 || (num_of_active_displays >= 3);
1139
1140 cz_ps->action = cz_current_ps->action;
1141
1142 if ((force_high == false) && (cz_ps->action == FORCE_HIGH))
1143 cz_ps->action = CANCEL_FORCE_HIGH;
1144 else if ((force_high == true) && (cz_ps->action != FORCE_HIGH))
1145 cz_ps->action = FORCE_HIGH;
1146 else
1147 cz_ps->action = DO_NOTHING;
1148
1149 return 0;
1150}
1151
1152static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1153{
1154 int result = 0;
1155
1156 result = cz_initialize_dpm_defaults(hwmgr);
1157 if (result != 0) {
1158 printk(KERN_ERR "[ powerplay ] cz_initialize_dpm_defaults failed\n");
1159 return result;
1160 }
1161
1162 result = cz_get_system_info_data(hwmgr);
1163 if (result != 0) {
1164 printk(KERN_ERR "[ powerplay ] cz_get_system_info_data failed\n");
1165 return result;
1166 }
1167
1168 cz_construct_boot_state(hwmgr);
1169
1170 result = phm_construct_table(hwmgr, &cz_setup_asic_master,
1171 &(hwmgr->setup_asic));
1172 if (result != 0) {
1173 printk(KERN_ERR "[ powerplay ] Fail to construct setup ASIC\n");
1174 return result;
1175 }
1176
1177 result = phm_construct_table(hwmgr, &cz_power_down_asic_master,
1178 &(hwmgr->power_down_asic));
1179 if (result != 0) {
1180 printk(KERN_ERR "[ powerplay ] Fail to construct power down ASIC\n");
1181 return result;
1182 }
1183
1184 result = phm_construct_table(hwmgr, &cz_disable_dpm_master,
1185 &(hwmgr->disable_dynamic_state_management));
1186 if (result != 0) {
1187 printk(KERN_ERR "[ powerplay ] Fail to disable_dynamic_state\n");
1188 return result;
1189 }
1190 result = phm_construct_table(hwmgr, &cz_enable_dpm_master,
1191 &(hwmgr->enable_dynamic_state_management));
1192 if (result != 0) {
1193 printk(KERN_ERR "[ powerplay ] Fail to enable_dynamic_state\n");
1194 return result;
1195 }
1196 result = phm_construct_table(hwmgr, &cz_set_power_state_master,
1197 &(hwmgr->set_power_state));
1198 if (result != 0) {
1199 printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n");
1200 return result;
1201 }
1202
1203 result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
1204 if (result != 0) {
1205 printk(KERN_ERR "[ powerplay ] Fail to construct enable_clock_power_gatings\n");
1206 return result;
1207 }
1208 return result;
1209}
1210
1211static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1212{
1213 if (hwmgr != NULL || hwmgr->backend != NULL) {
1214 kfree(hwmgr->backend);
1215 kfree(hwmgr);
1216 }
1217 return 0;
1218}
1219
1220int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1221{
1222 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1223
1224 if (cz_hwmgr->sclk_dpm.soft_min_clk !=
1225 cz_hwmgr->sclk_dpm.soft_max_clk)
1226 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1227 PPSMC_MSG_SetSclkSoftMin,
1228 cz_get_sclk_level(hwmgr,
1229 cz_hwmgr->sclk_dpm.soft_max_clk,
1230 PPSMC_MSG_SetSclkSoftMin));
1231 return 0;
1232}
1233
1234int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1235{
1236 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1237 struct phm_clock_voltage_dependency_table *table =
1238 hwmgr->dyn_state.vddc_dependency_on_sclk;
1239 unsigned long clock = 0, level;
1240
1241 if (NULL == table || table->count <= 0)
1242 return -EINVAL;
1243
1244 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
1245 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
1246
1247 level = cz_get_max_sclk_level(hwmgr) - 1;
1248
1249 if (level < table->count)
1250 clock = table->entries[level].clk;
1251 else
1252 clock = table->entries[table->count - 1].clk;
1253
1254 cz_hwmgr->sclk_dpm.soft_max_clk = clock;
1255 cz_hwmgr->sclk_dpm.hard_max_clk = clock;
1256
1257 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1258 PPSMC_MSG_SetSclkSoftMin,
1259 cz_get_sclk_level(hwmgr,
1260 cz_hwmgr->sclk_dpm.soft_min_clk,
1261 PPSMC_MSG_SetSclkSoftMin));
1262
1263 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1264 PPSMC_MSG_SetSclkSoftMax,
1265 cz_get_sclk_level(hwmgr,
1266 cz_hwmgr->sclk_dpm.soft_max_clk,
1267 PPSMC_MSG_SetSclkSoftMax));
1268
1269 return 0;
1270}
1271
1272int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1273{
1274 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1275
1276 if (cz_hwmgr->sclk_dpm.soft_min_clk !=
1277 cz_hwmgr->sclk_dpm.soft_max_clk) {
1278 cz_hwmgr->sclk_dpm.soft_max_clk =
1279 cz_hwmgr->sclk_dpm.soft_min_clk;
1280
1281 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1282 PPSMC_MSG_SetSclkSoftMax,
1283 cz_get_sclk_level(hwmgr,
1284 cz_hwmgr->sclk_dpm.soft_max_clk,
1285 PPSMC_MSG_SetSclkSoftMax));
1286 }
1287
1288 return 0;
1289}
1290
1291static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1292 enum amd_dpm_forced_level level)
1293{
1294 int ret = 0;
1295
1296 switch (level) {
1297 case AMD_DPM_FORCED_LEVEL_HIGH:
1298 ret = cz_phm_force_dpm_highest(hwmgr);
1299 if (ret)
1300 return ret;
1301 break;
1302 case AMD_DPM_FORCED_LEVEL_LOW:
1303 ret = cz_phm_force_dpm_lowest(hwmgr);
1304 if (ret)
1305 return ret;
1306 break;
1307 case AMD_DPM_FORCED_LEVEL_AUTO:
1308 ret = cz_phm_unforce_dpm_levels(hwmgr);
1309 if (ret)
1310 return ret;
1311 break;
1312 default:
1313 break;
1314 }
1315
1316 hwmgr->dpm_level = level;
1317
1318 return ret;
1319}
1320
1321int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1322{
1323 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1324 PHM_PlatformCaps_UVDPowerGating))
1325 return smum_send_msg_to_smc(hwmgr->smumgr,
1326 PPSMC_MSG_UVDPowerOFF);
1327 return 0;
1328}
1329
1330int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1331{
1332 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1333 PHM_PlatformCaps_UVDPowerGating)) {
1334 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1335 PHM_PlatformCaps_UVDDynamicPowerGating)) {
1336 return smum_send_msg_to_smc_with_parameter(
1337 hwmgr->smumgr,
1338 PPSMC_MSG_UVDPowerON, 1);
1339 } else {
1340 return smum_send_msg_to_smc_with_parameter(
1341 hwmgr->smumgr,
1342 PPSMC_MSG_UVDPowerON, 0);
1343 }
1344 }
1345
1346 return 0;
1347}
1348
1349int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1350{
1351 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1352 struct phm_uvd_clock_voltage_dependency_table *ptable =
1353 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1354
1355 if (!bgate) {
1356 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1357 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1358 PHM_PlatformCaps_StablePState)) {
1359 cz_hwmgr->uvd_dpm.hard_min_clk =
1360 ptable->entries[ptable->count - 1].vclk;
1361
1362 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1363 PPSMC_MSG_SetUvdHardMin,
1364 cz_get_uvd_level(hwmgr,
1365 cz_hwmgr->uvd_dpm.hard_min_clk,
1366 PPSMC_MSG_SetUvdHardMin));
1367
1368 cz_enable_disable_uvd_dpm(hwmgr, true);
1369 } else
1370 cz_enable_disable_uvd_dpm(hwmgr, true);
1371 } else
1372 cz_enable_disable_uvd_dpm(hwmgr, false);
1373
1374 return 0;
1375}
1376
1377int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1378{
1379 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1380 struct phm_vce_clock_voltage_dependency_table *ptable =
1381 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1382
1383 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1384 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1385 PHM_PlatformCaps_StablePState)) {
1386 cz_hwmgr->vce_dpm.hard_min_clk =
1387 ptable->entries[ptable->count - 1].ecclk;
1388
1389 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1390 PPSMC_MSG_SetEclkHardMin,
1391 cz_get_eclk_level(hwmgr,
1392 cz_hwmgr->vce_dpm.hard_min_clk,
1393 PPSMC_MSG_SetEclkHardMin));
1394 } else {
1395 /*EPR# 419220 -HW limitation to to */
1396 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
1397 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1398 PPSMC_MSG_SetEclkHardMin,
1399 cz_get_eclk_level(hwmgr,
1400 cz_hwmgr->vce_dpm.hard_min_clk,
1401 PPSMC_MSG_SetEclkHardMin));
1402
1403 }
1404 return 0;
1405}
1406
1407int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1408{
1409 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1410 PHM_PlatformCaps_VCEPowerGating))
1411 return smum_send_msg_to_smc(hwmgr->smumgr,
1412 PPSMC_MSG_VCEPowerOFF);
1413 return 0;
1414}
1415
1416int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1417{
1418 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1419 PHM_PlatformCaps_VCEPowerGating))
1420 return smum_send_msg_to_smc(hwmgr->smumgr,
1421 PPSMC_MSG_VCEPowerON);
1422 return 0;
1423}
1424
1425static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1426{
1427 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1428
1429 return cz_hwmgr->sys_info.bootup_uma_clock;
1430}
1431
1432static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1433{
1434 struct pp_power_state *ps;
1435 struct cz_power_state *cz_ps;
1436
1437 if (hwmgr == NULL)
1438 return -EINVAL;
1439
1440 ps = hwmgr->request_ps;
1441
1442 if (ps == NULL)
1443 return -EINVAL;
1444
1445 cz_ps = cast_PhwCzPowerState(&ps->hardware);
1446
1447 if (low)
1448 return cz_ps->levels[0].engineClock;
1449 else
1450 return cz_ps->levels[cz_ps->level-1].engineClock;
1451}
1452
1453static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1454 struct pp_hw_power_state *hw_ps)
1455{
1456 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1457 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
1458
1459 cz_ps->level = 1;
1460 cz_ps->nbps_flags = 0;
1461 cz_ps->bapm_flags = 0;
1462 cz_ps->levels[0] = cz_hwmgr->boot_power_level;
1463
1464 return 0;
1465}
1466
1467static int cz_dpm_get_pp_table_entry_callback(
1468 struct pp_hwmgr *hwmgr,
1469 struct pp_hw_power_state *hw_ps,
1470 unsigned int index,
1471 const void *clock_info)
1472{
1473 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
1474
1475 const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info;
1476
1477 struct phm_clock_voltage_dependency_table *table =
1478 hwmgr->dyn_state.vddc_dependency_on_sclk;
1479 uint8_t clock_info_index = cz_clock_info->index;
1480
1481 if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1482 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1483
1484 cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1485 cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1486
1487 cz_ps->level = index + 1;
1488
1489 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1490 cz_ps->levels[index].dsDividerIndex = 5;
1491 cz_ps->levels[index].ssDividerIndex = 5;
1492 }
1493
1494 return 0;
1495}
1496
1497static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1498{
1499 int result;
1500 unsigned long ret = 0;
1501
1502 result = pp_tables_get_num_of_entries(hwmgr, &ret);
1503
1504 return result ? 0 : ret;
1505}
1506
1507static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1508 unsigned long entry, struct pp_power_state *ps)
1509{
1510 int result;
1511 struct cz_power_state *cz_ps;
1512
1513 ps->hardware.magic = PhwCz_Magic;
1514
1515 cz_ps = cast_PhwCzPowerState(&(ps->hardware));
1516
1517 result = pp_tables_get_entry(hwmgr, entry, ps,
1518 cz_dpm_get_pp_table_entry_callback);
1519
1520 cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1521 cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1522
1523 return result;
1524}
1525
1526int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
1527{
1528 return sizeof(struct cz_power_state);
1529}
1530
1531static void
1532cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
1533{
1534 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1535
1536 struct phm_clock_voltage_dependency_table *table =
1537 hwmgr->dyn_state.vddc_dependency_on_sclk;
1538
1539 struct phm_vce_clock_voltage_dependency_table *vce_table =
1540 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1541
1542 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1543 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1544
1545 uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1546 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1547 uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1548 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1549 uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1550 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1551
1552 uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1553 uint16_t vddnb, vddgfx;
1554 int result;
1555
1556 if (sclk_index >= NUM_SCLK_LEVELS) {
1557 seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
1558 } else {
1559 sclk = table->entries[sclk_index].clk;
1560 seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
1561 }
1562
1563 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1564 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1565 vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
1566 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1567 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1568 vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
1569 seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
1570
1571 seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
1572 if (!cz_hwmgr->uvd_power_gated) {
1573 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1574 seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
1575 } else {
1576 vclk = uvd_table->entries[uvd_index].vclk;
1577 dclk = uvd_table->entries[uvd_index].dclk;
1578 seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
1579 }
1580 }
1581
1582 seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
1583 if (!cz_hwmgr->vce_power_gated) {
1584 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1585 seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
1586 } else {
1587 ecclk = vce_table->entries[vce_index].ecclk;
1588 seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
1589 }
1590 }
1591
1592 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
1593 if (0 == result) {
1594 activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
1595 activity_percent = activity_percent > 100 ? 100 : activity_percent;
1596 } else {
1597 activity_percent = 50;
1598 }
1599
1600 seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
1601}
1602
1603static void cz_hw_print_display_cfg(
1604 const struct cc6_settings *cc6_settings)
1605{
1606 PP_DBG_LOG("New Display Configuration:\n");
1607
1608 PP_DBG_LOG(" cpu_cc6_disable: %d\n",
1609 cc6_settings->cpu_cc6_disable);
1610 PP_DBG_LOG(" cpu_pstate_disable: %d\n",
1611 cc6_settings->cpu_pstate_disable);
1612 PP_DBG_LOG(" nb_pstate_switch_disable: %d\n",
1613 cc6_settings->nb_pstate_switch_disable);
1614 PP_DBG_LOG(" cpu_pstate_separation_time: %d\n\n",
1615 cc6_settings->cpu_pstate_separation_time);
1616}
1617
1618 static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1619{
1620 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
1621 uint32_t data = 0;
1622
1623 if (hw_data->cc6_settings.cc6_setting_changed == true) {
1624
1625 hw_data->cc6_settings.cc6_setting_changed = false;
1626
1627 cz_hw_print_display_cfg(&hw_data->cc6_settings);
1628
1629 data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1630 & PWRMGT_SEPARATION_TIME_MASK)
1631 << PWRMGT_SEPARATION_TIME_SHIFT;
1632
1633 data|= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1634 << PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
1635
1636 data|= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1637 << PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
1638
1639 PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1640 data);
1641
1642 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1643 PPSMC_MSG_SetDisplaySizePowerParams,
1644 data);
1645 }
1646
1647 return 0;
1648}
1649
1650
1651 static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1652 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1653 {
1654 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
1655
1656 if (separation_time !=
1657 hw_data->cc6_settings.cpu_pstate_separation_time
1658 || cc6_disable !=
1659 hw_data->cc6_settings.cpu_cc6_disable
1660 || pstate_disable !=
1661 hw_data->cc6_settings.cpu_pstate_disable
1662 || pstate_switch_disable !=
1663 hw_data->cc6_settings.nb_pstate_switch_disable) {
1664
1665 hw_data->cc6_settings.cc6_setting_changed = true;
1666
1667 hw_data->cc6_settings.cpu_pstate_separation_time =
1668 separation_time;
1669 hw_data->cc6_settings.cpu_cc6_disable =
1670 cc6_disable;
1671 hw_data->cc6_settings.cpu_pstate_disable =
1672 pstate_disable;
1673 hw_data->cc6_settings.nb_pstate_switch_disable =
1674 pstate_switch_disable;
1675
1676 }
1677
1678 return 0;
1679}
1680
1681 static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
1682 struct amd_pp_dal_clock_info*info)
1683{
1684 uint32_t i;
1685 const struct phm_clock_voltage_dependency_table * table =
1686 hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1687 const struct phm_clock_and_voltage_limits* limits =
1688 &hwmgr->dyn_state.max_clock_voltage_on_ac;
1689
1690 info->engine_max_clock = limits->sclk;
1691 info->memory_max_clock = limits->mclk;
1692
1693 for (i = table->count - 1; i > 0; i--) {
1694
1695 if (limits->vddc >= table->entries[i].v) {
1696 info->level = table->entries[i].clk;
1697 return 0;
1698 }
1699 }
1700 return -EINVAL;
1701}
1702
1703static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1704 .backend_init = cz_hwmgr_backend_init,
1705 .backend_fini = cz_hwmgr_backend_fini,
1706 .asic_setup = NULL,
1707 .apply_state_adjust_rules = cz_apply_state_adjust_rules,
1708 .force_dpm_level = cz_dpm_force_dpm_level,
1709 .get_power_state_size = cz_get_power_state_size,
1710 .powerdown_uvd = cz_dpm_powerdown_uvd,
1711 .powergate_uvd = cz_dpm_powergate_uvd,
1712 .powergate_vce = cz_dpm_powergate_vce,
1713 .get_mclk = cz_dpm_get_mclk,
1714 .get_sclk = cz_dpm_get_sclk,
1715 .patch_boot_state = cz_dpm_patch_boot_state,
1716 .get_pp_table_entry = cz_dpm_get_pp_table_entry,
1717 .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
1718 .print_current_perforce_level = cz_print_current_perforce_level,
1719 .set_cpu_power_state = cz_set_cpu_power_state,
1720 .store_cc6_data = cz_store_cc6_data,
1721 .get_dal_power_level= cz_get_dal_power_level,
1722};
1723
1724int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
1725{
1726 struct cz_hwmgr *cz_hwmgr;
1727 int ret = 0;
1728
1729 cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
1730 if (cz_hwmgr == NULL)
1731 return -ENOMEM;
1732
1733 hwmgr->backend = cz_hwmgr;
1734 hwmgr->hwmgr_func = &cz_hwmgr_funcs;
1735 hwmgr->pptable_func = &pptable_funcs;
1736 return ret;
1737}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
new file mode 100644
index 000000000000..c477f1cf3f23
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
@@ -0,0 +1,326 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _CZ_HWMGR_H_
25#define _CZ_HWMGR_H_
26
27#include "cgs_common.h"
28#include "ppatomctrl.h"
29
30#define CZ_NUM_NBPSTATES 4
31#define CZ_NUM_NBPMEMORYCLOCK 2
32#define MAX_DISPLAY_CLOCK_LEVEL 8
33#define CZ_AT_DFLT 30
34#define CZ_MAX_HARDWARE_POWERLEVELS 8
35#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
36#define CZ_MIN_DEEP_SLEEP_SCLK 800
37
38/* Carrizo device IDs */
39#define DEVICE_ID_CZ_9870 0x9870
40#define DEVICE_ID_CZ_9874 0x9874
41#define DEVICE_ID_CZ_9875 0x9875
42#define DEVICE_ID_CZ_9876 0x9876
43#define DEVICE_ID_CZ_9877 0x9877
44
45#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value) \
46 cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value)
47
48struct cz_dpm_entry {
49 uint32_t soft_min_clk;
50 uint32_t hard_min_clk;
51 uint32_t soft_max_clk;
52 uint32_t hard_max_clk;
53};
54
55struct cz_sys_info {
56 uint32_t bootup_uma_clock;
57 uint32_t bootup_engine_clock;
58 uint32_t dentist_vco_freq;
59 uint32_t nb_dpm_enable;
60 uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK];
61 uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
62 uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES];
63 uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL];
64 uint16_t bootup_nb_voltage_index;
65 uint8_t htc_tmp_lmt;
66 uint8_t htc_hyst_lmt;
67 uint32_t system_config;
68 uint32_t uma_channel_number;
69};
70
71#define MAX_DISPLAYPHY_IDS 0x8
72#define DISPLAYPHY_LANEMASK 0xF
73#define UNKNOWN_TRANSMITTER_PHY_ID (-1)
74
75#define DISPLAYPHY_PHYID_SHIFT 24
76#define DISPLAYPHY_LANESELECT_SHIFT 16
77
78#define DISPLAYPHY_RX_SELECT 0x1
79#define DISPLAYPHY_TX_SELECT 0x2
80#define DISPLAYPHY_CORE_SELECT 0x4
81
82#define DDI_POWERGATING_ARG(phyID, lanemask, rx, tx, core) \
83 (((uint32_t)(phyID))<<DISPLAYPHY_PHYID_SHIFT | \
84 ((uint32_t)(lanemask))<<DISPLAYPHY_LANESELECT_SHIFT | \
85 ((rx) ? DISPLAYPHY_RX_SELECT : 0) | \
86 ((tx) ? DISPLAYPHY_TX_SELECT : 0) | \
87 ((core) ? DISPLAYPHY_CORE_SELECT : 0))
88
89struct cz_display_phy_info_entry {
90 uint8_t phy_present;
91 uint8_t active_lane_mapping;
92 uint8_t display_config_type;
93 uint8_t active_number_of_lanes;
94};
95
96#define CZ_MAX_DISPLAYPHY_IDS 10
97
98struct cz_display_phy_info {
99 bool display_phy_access_initialized;
100 struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS];
101};
102
103struct cz_power_level {
104 uint32_t engineClock;
105 uint8_t vddcIndex;
106 uint8_t dsDividerIndex;
107 uint8_t ssDividerIndex;
108 uint8_t allowGnbSlow;
109 uint8_t forceNBPstate;
110 uint8_t display_wm;
111 uint8_t vce_wm;
112 uint8_t numSIMDToPowerDown;
113 uint8_t hysteresis_up;
114 uint8_t rsv[3];
115};
116
117struct cz_uvd_clocks {
118 uint32_t vclk;
119 uint32_t dclk;
120 uint32_t vclk_low_divider;
121 uint32_t vclk_high_divider;
122 uint32_t dclk_low_divider;
123 uint32_t dclk_high_divider;
124};
125
126enum cz_pstate_previous_action {
127 DO_NOTHING = 1,
128 FORCE_HIGH,
129 CANCEL_FORCE_HIGH
130};
131
132struct pp_disable_nb_ps_flags {
133 union {
134 struct {
135 uint32_t entry : 1;
136 uint32_t display : 1;
137 uint32_t driver: 1;
138 uint32_t vce : 1;
139 uint32_t uvd : 1;
140 uint32_t acp : 1;
141 uint32_t reserved: 26;
142 } bits;
143 uint32_t u32All;
144 };
145};
146
147struct cz_power_state {
148 unsigned int magic;
149 uint32_t level;
150 struct cz_uvd_clocks uvd_clocks;
151 uint32_t evclk;
152 uint32_t ecclk;
153 uint32_t samclk;
154 uint32_t acpclk;
155 bool need_dfs_bypass;
156 uint32_t nbps_flags;
157 uint32_t bapm_flags;
158 uint8_t dpm_0_pg_nb_ps_low;
159 uint8_t dpm_0_pg_nb_ps_high;
160 uint8_t dpm_x_nb_ps_low;
161 uint8_t dpm_x_nb_ps_high;
162 enum cz_pstate_previous_action action;
163 struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS];
164 struct pp_disable_nb_ps_flags disable_nb_ps_flag;
165};
166
167#define DPMFlags_SCLK_Enabled 0x00000001
168#define DPMFlags_UVD_Enabled 0x00000002
169#define DPMFlags_VCE_Enabled 0x00000004
170#define DPMFlags_ACP_Enabled 0x00000008
171#define DPMFlags_ForceHighestValid 0x40000000
172#define DPMFlags_Debug 0x80000000
173
174#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 /* bit 0 */
175#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
176#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 /* bit 23 */
177#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 /* bit 24 */
178
179struct cc6_settings {
180 bool cc6_setting_changed;
181 bool nb_pstate_switch_disable;/* controls NB PState switch */
182 bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
183 bool cpu_pstate_disable;
184 uint32_t cpu_pstate_separation_time;
185};
186
187struct cz_hwmgr {
188 uint32_t activity_target[CZ_MAX_HARDWARE_POWERLEVELS];
189 uint32_t dpm_interval;
190
191 uint32_t voltage_drop_threshold;
192
193 uint32_t voting_rights_clients;
194
195 uint32_t disable_driver_thermal_policy;
196
197 uint32_t static_screen_threshold;
198
199 uint32_t gfx_power_gating_threshold;
200
201 uint32_t activity_hysteresis;
202 uint32_t bootup_sclk_divider;
203 uint32_t gfx_ramp_step;
204 uint32_t gfx_ramp_delay; /* in micro-seconds */
205
206 uint32_t thermal_auto_throttling_treshold;
207
208 struct cz_sys_info sys_info;
209
210 struct cz_power_level boot_power_level;
211 struct cz_power_state *cz_current_ps;
212 struct cz_power_state *cz_requested_ps;
213
214 uint32_t mgcg_cgtt_local0;
215 uint32_t mgcg_cgtt_local1;
216
217 uint32_t tdr_clock; /* in 10khz unit */
218
219 uint32_t ddi_power_gating_disabled;
220 uint32_t disable_gfx_power_gating_in_uvd;
221 uint32_t disable_nb_ps3_in_battery;
222
223 uint32_t lock_nb_ps_in_uvd_play_back;
224
225 struct cz_display_phy_info display_phy_info;
226 uint32_t vce_slow_sclk_threshold; /* default 200mhz */
227 uint32_t dce_slow_sclk_threshold; /* default 300mhz */
228 uint32_t min_sclk_did; /* minimum sclk divider */
229
230 bool disp_clk_bypass;
231 bool disp_clk_bypass_pending;
232 uint32_t bapm_enabled;
233 uint32_t clock_slow_down_freq;
234 uint32_t skip_clock_slow_down;
235 uint32_t enable_nb_ps_policy;
236 uint32_t voltage_drop_in_dce_power_gating;
237 uint32_t uvd_dpm_interval;
238 uint32_t override_dynamic_mgpg;
239 uint32_t lclk_deep_enabled;
240
241 uint32_t uvd_performance;
242
243 bool video_start;
244 bool battery_state;
245 uint32_t lowest_valid;
246 uint32_t highest_valid;
247 uint32_t high_voltage_threshold;
248 uint32_t is_nb_dpm_enabled;
249 struct cc6_settings cc6_settings;
250 uint32_t is_voltage_island_enabled;
251
252 bool pgacpinit;
253
254 uint8_t disp_config;
255
256 /* PowerTune */
257 uint32_t power_containment_features;
258 bool cac_enabled;
259 bool disable_uvd_power_tune_feature;
260 bool enable_ba_pm_feature;
261 bool enable_tdc_limit_feature;
262
263 uint32_t sram_end;
264 uint32_t dpm_table_start;
265 uint32_t soft_regs_start;
266
267 uint8_t uvd_level_count;
268 uint8_t vce_level_count;
269
270 uint8_t acp_level_count;
271 uint8_t samu_level_count;
272 uint32_t fps_high_threshold;
273 uint32_t fps_low_threshold;
274
275 uint32_t dpm_flags;
276 struct cz_dpm_entry sclk_dpm;
277 struct cz_dpm_entry uvd_dpm;
278 struct cz_dpm_entry vce_dpm;
279 struct cz_dpm_entry acp_dpm;
280
281 uint8_t uvd_boot_level;
282 uint8_t vce_boot_level;
283 uint8_t acp_boot_level;
284 uint8_t samu_boot_level;
285 uint8_t uvd_interval;
286 uint8_t vce_interval;
287 uint8_t acp_interval;
288 uint8_t samu_interval;
289
290 uint8_t graphics_interval;
291 uint8_t graphics_therm_throttle_enable;
292 uint8_t graphics_voltage_change_enable;
293
294 uint8_t graphics_clk_slow_enable;
295 uint8_t graphics_clk_slow_divider;
296
297 uint32_t display_cac;
298 uint32_t low_sclk_interrupt_threshold;
299
300 uint32_t dram_log_addr_h;
301 uint32_t dram_log_addr_l;
302 uint32_t dram_log_phy_addr_h;
303 uint32_t dram_log_phy_addr_l;
304 uint32_t dram_log_buff_size;
305
306 bool uvd_power_gated;
307 bool vce_power_gated;
308 bool samu_power_gated;
309 bool acp_power_gated;
310 bool acp_power_up_no_dsp;
311 uint32_t active_process_mask;
312
313 uint32_t max_sclk_level;
314 uint32_t num_of_clk_entries;
315};
316
317struct pp_hwmgr;
318
319int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
320int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
321int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
322int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
323int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
324int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
325int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
326#endif /* _CZ_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
new file mode 100644
index 000000000000..e68edf06ed73
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "fiji_clockpowergating.h"
26#include "fiji_ppsmc.h"
27#include "fiji_hwmgr.h"
28
29int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
30{
31 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
32
33 data->uvd_power_gated = false;
34 data->vce_power_gated = false;
35 data->samu_power_gated = false;
36 data->acp_power_gated = false;
37
38 return 0;
39}
40
41int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
42{
43 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
44
45 if (data->uvd_power_gated == bgate)
46 return 0;
47
48 data->uvd_power_gated = bgate;
49
50 if (bgate)
51 fiji_update_uvd_dpm(hwmgr, true);
52 else
53 fiji_update_uvd_dpm(hwmgr, false);
54
55 return 0;
56}
57
58int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
59{
60 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
61 struct phm_set_power_state_input states;
62 const struct pp_power_state *pcurrent;
63 struct pp_power_state *requested;
64
65 if (data->vce_power_gated == bgate)
66 return 0;
67
68 data->vce_power_gated = bgate;
69
70 pcurrent = hwmgr->current_ps;
71 requested = hwmgr->request_ps;
72
73 states.pcurrent_state = &(pcurrent->hardware);
74 states.pnew_state = &(requested->hardware);
75
76 fiji_update_vce_dpm(hwmgr, &states);
77 fiji_enable_disable_vce_dpm(hwmgr, !bgate);
78
79 return 0;
80}
81
82int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
83{
84 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
85
86 if (data->samu_power_gated == bgate)
87 return 0;
88
89 data->samu_power_gated = bgate;
90
91 if (bgate)
92 fiji_update_samu_dpm(hwmgr, true);
93 else
94 fiji_update_samu_dpm(hwmgr, false);
95
96 return 0;
97}
98
99int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
100{
101 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
102
103 if (data->acp_power_gated == bgate)
104 return 0;
105
106 data->acp_power_gated = bgate;
107
108 if (bgate)
109 fiji_update_acp_dpm(hwmgr, true);
110 else
111 fiji_update_acp_dpm(hwmgr, false);
112
113 return 0;
114}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
new file mode 100644
index 000000000000..33af5f511ab8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _FIJI_CLOCK_POWER_GATING_H_
25#define _FIJI_CLOCK_POWER_GATING_H_
26
27#include "fiji_hwmgr.h"
28#include "pp_asicblocks.h"
29
30extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
31extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
32extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
33extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
34extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
35#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
new file mode 100644
index 000000000000..32d43e8fecb2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_DYN_DEFAULTS_H
25#define FIJI_DYN_DEFAULTS_H
26
27/** \file
28* Volcanic Islands Dynamic default parameters.
29*/
30
31enum FIJIdpm_TrendDetection
32{
33 FIJIAdpm_TrendDetection_AUTO,
34 FIJIAdpm_TrendDetection_UP,
35 FIJIAdpm_TrendDetection_DOWN
36};
37typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
38
39/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
40
41/* Bit vector representing same fields as hardware register. */
42#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ????
43 * HDP_busy
44 * IH_busy
45 * UVD_busy
46 * VCE_busy
47 * ACP_busy
48 * SAMU_busy
49 * SDMA enabled */
50#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ????
51 * SH_Gfx_busy
52 * RB_Gfx_busy
53 * VCE_busy */
54
55#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility.
56 * FE_Gfx_busy
57 * RB_Gfx_busy
58 * ACP_busy */
59
60#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility.
61 * FE_Gfx_busy
62 * SH_Gfx_busy
63 * UVD_busy */
64
65#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy
66 * VCE_busy
67 * ACP_busy
68 * SAMU_busy */
69
70#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */
71#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */
72#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */
73
74
75/* thermal protection counter (units). */
76#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
77
78/* static screen threshold unit */
79#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0
80
81/* static screen threshold */
82#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8
83
84/* gfx idle clock stop threshold */
85#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
86
87/* Fixed reference divider to use when building baby stepping tables. */
88#define PPFIJI_REFERENCEDIVIDER_DFLT 4
89
90/* ULV voltage change delay time
91 * Used to be delay_vreg in N.I. split for S.I.
92 * Using N.I. delay_vreg value as default
93 * ReferenceClock = 2700
94 * VoltageResponseTime = 1000
95 * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
96 */
97#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687
98
99#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035
100#define PPFIJI_CGULVCONTROL_DFLT 0x00007450
101#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/
102#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */
103
104#endif
105
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
new file mode 100644
index 000000000000..28031a7eddba
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -0,0 +1,5127 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27
28#include "hwmgr.h"
29#include "fiji_smumgr.h"
30#include "atombios.h"
31#include "hardwaremanager.h"
32#include "ppatomctrl.h"
33#include "atombios.h"
34#include "cgs_common.h"
35#include "fiji_dyn_defaults.h"
36#include "fiji_powertune.h"
37#include "smu73.h"
38#include "smu/smu_7_1_3_d.h"
39#include "smu/smu_7_1_3_sh_mask.h"
40#include "gmc/gmc_8_1_d.h"
41#include "gmc/gmc_8_1_sh_mask.h"
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44#include "dce/dce_10_0_d.h"
45#include "dce/dce_10_0_sh_mask.h"
46#include "pppcielanes.h"
47#include "fiji_hwmgr.h"
48#include "tonga_processpptables.h"
49#include "tonga_pptable.h"
50#include "pp_debug.h"
51#include "pp_acpi.h"
52#include "amd_pcie_helpers.h"
53#include "cgs_linux.h"
54#include "ppinterrupt.h"
55
56#include "fiji_clockpowergating.h"
57#include "fiji_thermal.h"
58
59#define VOLTAGE_SCALE 4
60#define SMC_RAM_END 0x40000
61#define VDDC_VDDCI_DELTA 300
62
63#define MC_SEQ_MISC0_GDDR5_SHIFT 28
64#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
65#define MC_SEQ_MISC0_GDDR5_VALUE 5
66
67#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */
68#define MC_CG_ARB_FREQ_F1 0x0b
69#define MC_CG_ARB_FREQ_F2 0x0c
70#define MC_CG_ARB_FREQ_F3 0x0d
71
72/* From smc_reg.h */
73#define SMC_CG_IND_START 0xc0030000
74#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */
75
76#define VOLTAGE_SCALE 4
77#define VOLTAGE_VID_OFFSET_SCALE1 625
78#define VOLTAGE_VID_OFFSET_SCALE2 100
79
80#define VDDC_VDDCI_DELTA 300
81
82#define ixSWRST_COMMAND_1 0x1400103
83#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000
84
85/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
86enum DPM_EVENT_SRC {
87 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
88 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
89 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
90 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
91 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
92};
93
94
95/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
96 * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
97 */
98uint16_t fiji_clock_stretcher_lookup_table[2][4] = { {600, 1050, 3, 0},
99 {600, 1050, 6, 1} };
100
101/* [FF, SS] type, [] 4 voltage ranges, and
102 * [Floor Freq, Boundary Freq, VID min , VID max]
103 */
104uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
105{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
106 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
107
108/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
109 * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
110 */
111uint8_t fiji_clock_stretch_amount_conversion[2][6] = { {0, 1, 3, 2, 4, 5},
112 {0, 2, 4, 5, 6, 5} };
113
114const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
115
116struct fiji_power_state *cast_phw_fiji_power_state(
117 struct pp_hw_power_state *hw_ps)
118{
119 PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
120 "Invalid Powerstate Type!",
121 return NULL;);
122
123 return (struct fiji_power_state *)hw_ps;
124}
125
126const struct fiji_power_state *cast_const_phw_fiji_power_state(
127 const struct pp_hw_power_state *hw_ps)
128{
129 PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
130 "Invalid Powerstate Type!",
131 return NULL;);
132
133 return (const struct fiji_power_state *)hw_ps;
134}
135
136static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
137{
138 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
139 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
140 ? true : false;
141}
142
143static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
144{
145 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
146 struct fiji_ulv_parm *ulv = &data->ulv;
147
148 ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
149 data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
150 data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
151 data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
152 data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
153 data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
154 data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
155 data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
156 data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
157
158 data->static_screen_threshold_unit =
159 PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
160 data->static_screen_threshold =
161 PPFIJI_STATICSCREENTHRESHOLD_DFLT;
162
163 /* Unset ABM cap as it moved to DAL.
164 * Add PHM_PlatformCaps_NonABMSupportInPPLib
165 * for re-direct ABM related request to DAL
166 */
167 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
168 PHM_PlatformCaps_ABM);
169 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
170 PHM_PlatformCaps_NonABMSupportInPPLib);
171
172 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
173 PHM_PlatformCaps_DynamicACTiming);
174
175 fiji_initialize_power_tune_defaults(hwmgr);
176
177 data->mclk_stutter_mode_threshold = 60000;
178 data->pcie_gen_performance.max = PP_PCIEGen1;
179 data->pcie_gen_performance.min = PP_PCIEGen3;
180 data->pcie_gen_power_saving.max = PP_PCIEGen1;
181 data->pcie_gen_power_saving.min = PP_PCIEGen3;
182 data->pcie_lane_performance.max = 0;
183 data->pcie_lane_performance.min = 16;
184 data->pcie_lane_power_saving.max = 0;
185 data->pcie_lane_power_saving.min = 16;
186
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_DynamicUVDState);
189}
190
191static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
192 phm_ppt_v1_voltage_lookup_table *lookup_table,
193 uint16_t virtual_voltage_id, int32_t *sclk)
194{
195 uint8_t entryId;
196 uint8_t voltageId;
197 struct phm_ppt_v1_information *table_info =
198 (struct phm_ppt_v1_information *)(hwmgr->pptable);
199
200 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
201
202 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
203 for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
204 voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
205 if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
206 break;
207 }
208
209 PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
210 "Can't find requested voltage id in vdd_dep_on_sclk table!",
211 return -EINVAL;
212 );
213
214 *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
215
216 return 0;
217}
218
219/**
220* Get Leakage VDDC based on leakage ID.
221*
222* @param hwmgr the address of the powerplay hardware manager.
223* @return always 0
224*/
225static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
226{
227 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
228 uint16_t vv_id;
229 uint16_t vddc = 0;
230 uint16_t evv_default = 1150;
231 uint16_t i, j;
232 uint32_t sclk = 0;
233 struct phm_ppt_v1_information *table_info =
234 (struct phm_ppt_v1_information *)hwmgr->pptable;
235 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
236 table_info->vdd_dep_on_sclk;
237 int result;
238
239 for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
240 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
241 if (!fiji_get_sclk_for_voltage_evv(hwmgr,
242 table_info->vddc_lookup_table, vv_id, &sclk)) {
243 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_ClockStretcher)) {
245 for (j = 1; j < sclk_table->count; j++) {
246 if (sclk_table->entries[j].clk == sclk &&
247 sclk_table->entries[j].cks_enable == 0) {
248 sclk += 5000;
249 break;
250 }
251 }
252 }
253
254 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_EnableDriverEVV))
256 result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
257 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
258 else
259 result = -EINVAL;
260
261 if (result)
262 result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
263 VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
264
265 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
266 PP_ASSERT_WITH_CODE((vddc < 2000),
267 "Invalid VDDC value, greater than 2v!", result = -EINVAL;);
268
269 if (result)
270 /* 1.15V is the default safe value for Fiji */
271 vddc = evv_default;
272
273 /* the voltage should not be zero nor equal to leakage ID */
274 if (vddc != 0 && vddc != vv_id) {
275 data->vddc_leakage.actual_voltage
276 [data->vddc_leakage.count] = vddc;
277 data->vddc_leakage.leakage_id
278 [data->vddc_leakage.count] = vv_id;
279 data->vddc_leakage.count++;
280 }
281 }
282 }
283 return 0;
284}
285
286/**
287 * Change virtual leakage voltage to actual value.
288 *
289 * @param hwmgr the address of the powerplay hardware manager.
290 * @param pointer to changing voltage
291 * @param pointer to leakage table
292 */
293static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
294 uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
295{
296 uint32_t index;
297
298 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
299 for (index = 0; index < leakage_table->count; index++) {
300 /* if this voltage matches a leakage voltage ID */
301 /* patch with actual leakage voltage */
302 if (leakage_table->leakage_id[index] == *voltage) {
303 *voltage = leakage_table->actual_voltage[index];
304 break;
305 }
306 }
307
308 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
309 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
310}
311
312/**
313* Patch voltage lookup table by EVV leakages.
314*
315* @param hwmgr the address of the powerplay hardware manager.
316* @param pointer to voltage lookup table
317* @param pointer to leakage table
318* @return always 0
319*/
320static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
321 phm_ppt_v1_voltage_lookup_table *lookup_table,
322 struct fiji_leakage_voltage *leakage_table)
323{
324 uint32_t i;
325
326 for (i = 0; i < lookup_table->count; i++)
327 fiji_patch_with_vdd_leakage(hwmgr,
328 &lookup_table->entries[i].us_vdd, leakage_table);
329
330 return 0;
331}
332
333static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
334 struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
335 uint16_t *vddc)
336{
337 struct phm_ppt_v1_information *table_info =
338 (struct phm_ppt_v1_information *)(hwmgr->pptable);
339 fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
340 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
341 table_info->max_clock_voltage_on_dc.vddc;
342 return 0;
343}
344
345static int fiji_patch_voltage_dependency_tables_with_lookup_table(
346 struct pp_hwmgr *hwmgr)
347{
348 uint8_t entryId;
349 uint8_t voltageId;
350 struct phm_ppt_v1_information *table_info =
351 (struct phm_ppt_v1_information *)(hwmgr->pptable);
352
353 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
354 table_info->vdd_dep_on_sclk;
355 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
356 table_info->vdd_dep_on_mclk;
357 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
358 table_info->mm_dep_table;
359
360 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
361 voltageId = sclk_table->entries[entryId].vddInd;
362 sclk_table->entries[entryId].vddc =
363 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
364 }
365
366 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
367 voltageId = mclk_table->entries[entryId].vddInd;
368 mclk_table->entries[entryId].vddc =
369 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
370 }
371
372 for (entryId = 0; entryId < mm_table->count; ++entryId) {
373 voltageId = mm_table->entries[entryId].vddcInd;
374 mm_table->entries[entryId].vddc =
375 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
376 }
377
378 return 0;
379
380}
381
382static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
383{
384 /* Need to determine if we need calculated voltage. */
385 return 0;
386}
387
388static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
389{
390 /* Need to determine if we need calculated voltage from mm table. */
391 return 0;
392}
393
394static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
395 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
396{
397 uint32_t table_size, i, j;
398 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
399 table_size = lookup_table->count;
400
401 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
402 "Lookup table is empty", return -EINVAL);
403
404 /* Sorting voltages */
405 for (i = 0; i < table_size - 1; i++) {
406 for (j = i + 1; j > 0; j--) {
407 if (lookup_table->entries[j].us_vdd <
408 lookup_table->entries[j - 1].us_vdd) {
409 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
410 lookup_table->entries[j - 1] = lookup_table->entries[j];
411 lookup_table->entries[j] = tmp_voltage_lookup_record;
412 }
413 }
414 }
415
416 return 0;
417}
418
419static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
420{
421 int result = 0;
422 int tmp_result;
423 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
424 struct phm_ppt_v1_information *table_info =
425 (struct phm_ppt_v1_information *)(hwmgr->pptable);
426
427 tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
428 table_info->vddc_lookup_table, &(data->vddc_leakage));
429 if (tmp_result)
430 result = tmp_result;
431
432 tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
433 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
434 if (tmp_result)
435 result = tmp_result;
436
437 tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
438 if (tmp_result)
439 result = tmp_result;
440
441 tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
442 if (tmp_result)
443 result = tmp_result;
444
445 tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
446 if (tmp_result)
447 result = tmp_result;
448
449 tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
450 if(tmp_result)
451 result = tmp_result;
452
453 return result;
454}
455
456static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
457{
458 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
459 struct phm_ppt_v1_information *table_info =
460 (struct phm_ppt_v1_information *)(hwmgr->pptable);
461
462 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
463 table_info->vdd_dep_on_sclk;
464 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
465 table_info->vdd_dep_on_mclk;
466
467 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
468 "VDD dependency on SCLK table is missing. \
469 This table is mandatory", return -EINVAL);
470 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
471 "VDD dependency on SCLK table has to have is missing. \
472 This table is mandatory", return -EINVAL);
473
474 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
475 "VDD dependency on MCLK table is missing. \
476 This table is mandatory", return -EINVAL);
477 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
478 "VDD dependency on MCLK table has to have is missing. \
479 This table is mandatory", return -EINVAL);
480
481 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
482 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->
483 entries[allowed_sclk_vdd_table->count - 1].vddc;
484
485 table_info->max_clock_voltage_on_ac.sclk =
486 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
487 table_info->max_clock_voltage_on_ac.mclk =
488 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
489 table_info->max_clock_voltage_on_ac.vddc =
490 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
491 table_info->max_clock_voltage_on_ac.vddci =
492 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
493
494 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
495 table_info->max_clock_voltage_on_ac.sclk;
496 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
497 table_info->max_clock_voltage_on_ac.mclk;
498 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
499 table_info->max_clock_voltage_on_ac.vddc;
500 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
501 table_info->max_clock_voltage_on_ac.vddci;
502
503 return 0;
504}
505
506static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
507{
508 uint32_t speedCntl = 0;
509
510 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
511 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
512 ixPCIE_LC_SPEED_CNTL);
513 return((uint16_t)PHM_GET_FIELD(speedCntl,
514 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
515}
516
517static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
518{
519 uint32_t link_width;
520
521 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
522 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
523 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
524
525 PP_ASSERT_WITH_CODE((7 >= link_width),
526 "Invalid PCIe lane width!", return 0);
527
528 return decode_pcie_lane_width(link_width);
529}
530
531/** Patch the Boot State to match VBIOS boot clocks and voltage.
532*
533* @param hwmgr Pointer to the hardware manager.
534* @param pPowerState The address of the PowerState instance being created.
535*
536*/
537static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
538 struct pp_hw_power_state *hw_ps)
539{
540 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
541 struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
542 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
543 uint16_t size;
544 uint8_t frev, crev;
545 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
546
547 /* First retrieve the Boot clocks and VDDC from the firmware info table.
548 * We assume here that fw_info is unchanged if this call fails.
549 */
550 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
551 hwmgr->device, index,
552 &size, &frev, &crev);
553 if (!fw_info)
554 /* During a test, there is no firmware info table. */
555 return 0;
556
557 /* Patch the state. */
558 data->vbios_boot_state.sclk_bootup_value =
559 le32_to_cpu(fw_info->ulDefaultEngineClock);
560 data->vbios_boot_state.mclk_bootup_value =
561 le32_to_cpu(fw_info->ulDefaultMemoryClock);
562 data->vbios_boot_state.mvdd_bootup_value =
563 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
564 data->vbios_boot_state.vddc_bootup_value =
565 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
566 data->vbios_boot_state.vddci_bootup_value =
567 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
568 data->vbios_boot_state.pcie_gen_bootup_value =
569 fiji_get_current_pcie_speed(hwmgr);
570 data->vbios_boot_state.pcie_lane_bootup_value =
571 (uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
572
573 /* set boot power state */
574 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
575 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
576 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
577 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
578
579 return 0;
580}
581
582static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
583{
584 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
585 uint32_t i;
586 struct phm_ppt_v1_information *table_info =
587 (struct phm_ppt_v1_information *)(hwmgr->pptable);
588 bool stay_in_boot;
589 int result;
590
591 data->dll_default_on = false;
592 data->sram_end = SMC_RAM_END;
593
594 for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
595 data->activity_target[i] = FIJI_AT_DFLT;
596
597 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
598
599 data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
600 data->mclk_dpm0_activity_target = 0xa;
601
602 data->sclk_dpm_key_disabled = 0;
603 data->mclk_dpm_key_disabled = 0;
604 data->pcie_dpm_key_disabled = 0;
605
606 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
607 PHM_PlatformCaps_UnTabledHardwareInterface);
608 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
609 PHM_PlatformCaps_TablelessHardwareInterface);
610
611 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
612 PHM_PlatformCaps_SclkDeepSleep);
613
614 data->gpio_debug = 0;
615
616 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
617 PHM_PlatformCaps_DynamicPatchPowerState);
618
619 /* need to set voltage control types before EVV patching */
620 data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
621 data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
622 data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
623
624 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
625 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
626 data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
627
628 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
629 PHM_PlatformCaps_EnableMVDDControl))
630 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
631 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
632 data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
633
634 if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
635 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
636 PHM_PlatformCaps_EnableMVDDControl);
637
638 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
639 PHM_PlatformCaps_ControlVDDCI)) {
640 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
641 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
642 data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
643 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
644 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
645 data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
646 }
647
648 if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
649 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
650 PHM_PlatformCaps_ControlVDDCI);
651
652 if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
653 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
654 PHM_PlatformCaps_ClockStretcher);
655
656 fiji_init_dpm_defaults(hwmgr);
657
658 /* Get leakage voltage based on leakage ID. */
659 fiji_get_evv_voltages(hwmgr);
660
661 /* Patch our voltage dependency table with actual leakage voltage
662 * We need to perform leakage translation before it's used by other functions
663 */
664 fiji_complete_dependency_tables(hwmgr);
665
666 /* Parse pptable data read from VBIOS */
667 fiji_set_private_data_based_on_pptable(hwmgr);
668
669 /* ULV Support */
670 data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
671
672 /* Initalize Dynamic State Adjustment Rule Settings */
673 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
674
675 if (!result) {
676 data->uvd_enabled = false;
677 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
678 PHM_PlatformCaps_EnableSMU7ThermalManagement);
679 data->vddc_phase_shed_control = false;
680 }
681
682 stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
683 PHM_PlatformCaps_StayInBootState);
684
685 if (0 == result) {
686 struct cgs_system_info sys_info = {0};
687
688 data->is_tlu_enabled = 0;
689 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
690 FIJI_MAX_HARDWARE_POWERLEVELS;
691 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
692 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
693
694 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
695 PHM_PlatformCaps_FanSpeedInTableIsRPM);
696
697 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
698 hwmgr->thermal_controller.
699 advanceFanControlParameters.ucFanControlMode) {
700 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
701 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
702 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
703 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
704 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
705 table_info->cac_dtp_table->usOperatingTempMinLimit;
706 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
707 table_info->cac_dtp_table->usOperatingTempMaxLimit;
708 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
709 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
710 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
711 table_info->cac_dtp_table->usOperatingTempStep;
712 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
713 table_info->cac_dtp_table->usTargetOperatingTemp;
714
715 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
716 PHM_PlatformCaps_ODFuzzyFanControlSupport);
717 }
718
719 sys_info.size = sizeof(struct cgs_system_info);
720 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
721 result = cgs_query_system_info(hwmgr->device, &sys_info);
722 if (result)
723 data->pcie_gen_cap = 0x30007;
724 else
725 data->pcie_gen_cap = (uint32_t)sys_info.value;
726 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
727 data->pcie_spc_cap = 20;
728 sys_info.size = sizeof(struct cgs_system_info);
729 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
730 result = cgs_query_system_info(hwmgr->device, &sys_info);
731 if (result)
732 data->pcie_lane_cap = 0x2f0000;
733 else
734 data->pcie_lane_cap = (uint32_t)sys_info.value;
735 } else {
736 /* Ignore return value in here, we are cleaning up a mess. */
737 tonga_hwmgr_backend_fini(hwmgr);
738 }
739
740 return 0;
741}
742
743/**
744 * Read clock related registers.
745 *
746 * @param hwmgr the address of the powerplay hardware manager.
747 * @return always 0
748 */
749static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
750{
751 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
752
753 data->clock_registers.vCG_SPLL_FUNC_CNTL =
754 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
755 ixCG_SPLL_FUNC_CNTL);
756 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
757 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
758 ixCG_SPLL_FUNC_CNTL_2);
759 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
760 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
761 ixCG_SPLL_FUNC_CNTL_3);
762 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
763 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
764 ixCG_SPLL_FUNC_CNTL_4);
765 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
766 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
767 ixCG_SPLL_SPREAD_SPECTRUM);
768 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
769 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
770 ixCG_SPLL_SPREAD_SPECTRUM_2);
771
772 return 0;
773}
774
775/**
776 * Find out if memory is GDDR5.
777 *
778 * @param hwmgr the address of the powerplay hardware manager.
779 * @return always 0
780 */
781static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
782{
783 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
784 uint32_t temp;
785
786 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
787
788 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
789 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
790 MC_SEQ_MISC0_GDDR5_SHIFT));
791
792 return 0;
793}
794
795/**
796 * Enables Dynamic Power Management by SMC
797 *
798 * @param hwmgr the address of the powerplay hardware manager.
799 * @return always 0
800 */
801static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
802{
803 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
804 GENERAL_PWRMGT, STATIC_PM_EN, 1);
805
806 return 0;
807}
808
809/**
810 * Initialize PowerGating States for different engines
811 *
812 * @param hwmgr the address of the powerplay hardware manager.
813 * @return always 0
814 */
815static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
816{
817 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
818
819 data->uvd_power_gated = false;
820 data->vce_power_gated = false;
821 data->samu_power_gated = false;
822 data->acp_power_gated = false;
823 data->pg_acp_init = true;
824
825 return 0;
826}
827
828static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
829{
830 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
831 data->low_sclk_interrupt_threshold = 0;
832
833 return 0;
834}
835
836static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
837{
838 int tmp_result, result = 0;
839
840 tmp_result = fiji_read_clock_registers(hwmgr);
841 PP_ASSERT_WITH_CODE((0 == tmp_result),
842 "Failed to read clock registers!", result = tmp_result);
843
844 tmp_result = fiji_get_memory_type(hwmgr);
845 PP_ASSERT_WITH_CODE((0 == tmp_result),
846 "Failed to get memory type!", result = tmp_result);
847
848 tmp_result = fiji_enable_acpi_power_management(hwmgr);
849 PP_ASSERT_WITH_CODE((0 == tmp_result),
850 "Failed to enable ACPI power management!", result = tmp_result);
851
852 tmp_result = fiji_init_power_gate_state(hwmgr);
853 PP_ASSERT_WITH_CODE((0 == tmp_result),
854 "Failed to init power gate state!", result = tmp_result);
855
856 tmp_result = tonga_get_mc_microcode_version(hwmgr);
857 PP_ASSERT_WITH_CODE((0 == tmp_result),
858 "Failed to get MC microcode version!", result = tmp_result);
859
860 tmp_result = fiji_init_sclk_threshold(hwmgr);
861 PP_ASSERT_WITH_CODE((0 == tmp_result),
862 "Failed to init sclk threshold!", result = tmp_result);
863
864 return result;
865}
866
867/**
868* Checks if we want to support voltage control
869*
870* @param hwmgr the address of the powerplay hardware manager.
871*/
872static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
873{
874 const struct fiji_hwmgr *data =
875 (const struct fiji_hwmgr *)(hwmgr->backend);
876
877 return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
878}
879
880/**
881* Enable voltage control
882*
883* @param hwmgr the address of the powerplay hardware manager.
884* @return always 0
885*/
886static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
887{
888 /* enable voltage control */
889 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
890 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
891
892 return 0;
893}
894
895/**
896* Remove repeated voltage values and create table with unique values.
897*
898* @param hwmgr the address of the powerplay hardware manager.
899* @param vol_table the pointer to changing voltage table
900* @return 0 in success
901*/
902
903static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
904 struct pp_atomctrl_voltage_table *vol_table)
905{
906 uint32_t i, j;
907 uint16_t vvalue;
908 bool found = false;
909 struct pp_atomctrl_voltage_table *table;
910
911 PP_ASSERT_WITH_CODE((NULL != vol_table),
912 "Voltage Table empty.", return -EINVAL);
913 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
914 GFP_KERNEL);
915
916 if (NULL == table)
917 return -ENOMEM;
918
919 table->mask_low = vol_table->mask_low;
920 table->phase_delay = vol_table->phase_delay;
921
922 for (i = 0; i < vol_table->count; i++) {
923 vvalue = vol_table->entries[i].value;
924 found = false;
925
926 for (j = 0; j < table->count; j++) {
927 if (vvalue == table->entries[j].value) {
928 found = true;
929 break;
930 }
931 }
932
933 if (!found) {
934 table->entries[table->count].value = vvalue;
935 table->entries[table->count].smio_low =
936 vol_table->entries[i].smio_low;
937 table->count++;
938 }
939 }
940
941 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
942 kfree(table);
943
944 return 0;
945}
946
947static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
948 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
949{
950 uint32_t i;
951 int result;
952 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
953 struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
954
955 PP_ASSERT_WITH_CODE((0 != dep_table->count),
956 "Voltage Dependency Table empty.", return -EINVAL);
957
958 vol_table->mask_low = 0;
959 vol_table->phase_delay = 0;
960 vol_table->count = dep_table->count;
961
962 for (i = 0; i < dep_table->count; i++) {
963 vol_table->entries[i].value = dep_table->entries[i].mvdd;
964 vol_table->entries[i].smio_low = 0;
965 }
966
967 result = fiji_trim_voltage_table(hwmgr, vol_table);
968 PP_ASSERT_WITH_CODE((0 == result),
969 "Failed to trim MVDD table.", return result);
970
971 return 0;
972}
973
974static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
975 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
976{
977 uint32_t i;
978 int result;
979 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
980 struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
981
982 PP_ASSERT_WITH_CODE((0 != dep_table->count),
983 "Voltage Dependency Table empty.", return -EINVAL);
984
985 vol_table->mask_low = 0;
986 vol_table->phase_delay = 0;
987 vol_table->count = dep_table->count;
988
989 for (i = 0; i < dep_table->count; i++) {
990 vol_table->entries[i].value = dep_table->entries[i].vddci;
991 vol_table->entries[i].smio_low = 0;
992 }
993
994 result = fiji_trim_voltage_table(hwmgr, vol_table);
995 PP_ASSERT_WITH_CODE((0 == result),
996 "Failed to trim VDDCI table.", return result);
997
998 return 0;
999}
1000
1001static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1002 phm_ppt_v1_voltage_lookup_table *lookup_table)
1003{
1004 int i = 0;
1005 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1006 struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
1007
1008 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
1009 "Voltage Lookup Table empty.", return -EINVAL);
1010
1011 vol_table->mask_low = 0;
1012 vol_table->phase_delay = 0;
1013
1014 vol_table->count = lookup_table->count;
1015
1016 for (i = 0; i < vol_table->count; i++) {
1017 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
1018 vol_table->entries[i].smio_low = 0;
1019 }
1020
1021 return 0;
1022}
1023
1024/* ---- Voltage Tables ----
1025 * If the voltage table would be bigger than
1026 * what will fit into the state table on
1027 * the SMC keep only the higher entries.
1028 */
1029static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
1030 uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
1031{
1032 unsigned int i, diff;
1033
1034 if (vol_table->count <= max_vol_steps)
1035 return;
1036
1037 diff = vol_table->count - max_vol_steps;
1038
1039 for (i = 0; i < max_vol_steps; i++)
1040 vol_table->entries[i] = vol_table->entries[i + diff];
1041
1042 vol_table->count = max_vol_steps;
1043
1044 return;
1045}
1046
1047/**
1048* Create Voltage Tables.
1049*
1050* @param hwmgr the address of the powerplay hardware manager.
1051* @return always 0
1052*/
1053static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1054{
1055 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1056 struct phm_ppt_v1_information *table_info =
1057 (struct phm_ppt_v1_information *)hwmgr->pptable;
1058 int result;
1059
1060 if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1061 result = atomctrl_get_voltage_table_v3(hwmgr,
1062 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
1063 &(data->mvdd_voltage_table));
1064 PP_ASSERT_WITH_CODE((0 == result),
1065 "Failed to retrieve MVDD table.",
1066 return result);
1067 } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1068 result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
1069 table_info->vdd_dep_on_mclk);
1070 PP_ASSERT_WITH_CODE((0 == result),
1071 "Failed to retrieve SVI2 MVDD table from dependancy table.",
1072 return result;);
1073 }
1074
1075 if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1076 result = atomctrl_get_voltage_table_v3(hwmgr,
1077 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
1078 &(data->vddci_voltage_table));
1079 PP_ASSERT_WITH_CODE((0 == result),
1080 "Failed to retrieve VDDCI table.",
1081 return result);
1082 } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1083 result = fiji_get_svi2_vddci_voltage_table(hwmgr,
1084 table_info->vdd_dep_on_mclk);
1085 PP_ASSERT_WITH_CODE((0 == result),
1086 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
1087 return result);
1088 }
1089
1090 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1091 result = fiji_get_svi2_vdd_voltage_table(hwmgr,
1092 table_info->vddc_lookup_table);
1093 PP_ASSERT_WITH_CODE((0 == result),
1094 "Failed to retrieve SVI2 VDDC table from lookup table.",
1095 return result);
1096 }
1097
1098 PP_ASSERT_WITH_CODE(
1099 (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
1100 "Too many voltage values for VDDC. Trimming to fit state table.",
1101 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1102 SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
1103
1104 PP_ASSERT_WITH_CODE(
1105 (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
1106 "Too many voltage values for VDDCI. Trimming to fit state table.",
1107 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1108 SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
1109
1110 PP_ASSERT_WITH_CODE(
1111 (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
1112 "Too many voltage values for MVDD. Trimming to fit state table.",
1113 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1114 SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
1115
1116 return 0;
1117}
1118
1119static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
1120{
1121 /* Program additional LP registers
1122 * that are no longer programmed by VBIOS
1123 */
1124 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
1125 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
1126 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
1127 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
1128 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
1129 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
1130 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
1131 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
1132 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
1133 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
1134 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
1135 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
1136 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
1137 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
1138
1139 return 0;
1140}
1141
1142/**
1143* Programs static screed detection parameters
1144*
1145* @param hwmgr the address of the powerplay hardware manager.
1146* @return always 0
1147*/
1148static int fiji_program_static_screen_threshold_parameters(
1149 struct pp_hwmgr *hwmgr)
1150{
1151 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1152
1153 /* Set static screen threshold unit */
1154 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1155 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
1156 data->static_screen_threshold_unit);
1157 /* Set static screen threshold */
1158 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1159 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
1160 data->static_screen_threshold);
1161
1162 return 0;
1163}
1164
1165/**
1166* Setup display gap for glitch free memory clock switching.
1167*
1168* @param hwmgr the address of the powerplay hardware manager.
1169* @return always 0
1170*/
1171static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
1172{
1173 uint32_t displayGap =
1174 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1175 ixCG_DISPLAY_GAP_CNTL);
1176
1177 displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
1178 DISP_GAP, DISPLAY_GAP_IGNORE);
1179
1180 displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
1181 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
1182
1183 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1184 ixCG_DISPLAY_GAP_CNTL, displayGap);
1185
1186 return 0;
1187}
1188
1189/**
1190* Programs activity state transition voting clients
1191*
1192* @param hwmgr the address of the powerplay hardware manager.
1193* @return always 0
1194*/
1195static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
1196{
1197 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1198
1199 /* Clear reset for voting clients before enabling DPM */
1200 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1201 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
1202 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1203 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
1204
1205 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1206 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
1207 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1208 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
1209 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1210 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
1211 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1212 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
1213 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1214 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
1215 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1216 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
1217 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1218 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
1219 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1220 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
1221
1222 return 0;
1223}
1224
1225/**
1226* Get the location of various tables inside the FW image.
1227*
1228* @param hwmgr the address of the powerplay hardware manager.
1229* @return always 0
1230*/
1231static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
1232{
1233 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1234 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1235 uint32_t tmp;
1236 int result;
1237 bool error = false;
1238
1239 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1240 SMU7_FIRMWARE_HEADER_LOCATION +
1241 offsetof(SMU73_Firmware_Header, DpmTable),
1242 &tmp, data->sram_end);
1243
1244 if (0 == result)
1245 data->dpm_table_start = tmp;
1246
1247 error |= (0 != result);
1248
1249 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1250 SMU7_FIRMWARE_HEADER_LOCATION +
1251 offsetof(SMU73_Firmware_Header, SoftRegisters),
1252 &tmp, data->sram_end);
1253
1254 if (!result) {
1255 data->soft_regs_start = tmp;
1256 smu_data->soft_regs_start = tmp;
1257 }
1258
1259 error |= (0 != result);
1260
1261 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1262 SMU7_FIRMWARE_HEADER_LOCATION +
1263 offsetof(SMU73_Firmware_Header, mcRegisterTable),
1264 &tmp, data->sram_end);
1265
1266 if (!result)
1267 data->mc_reg_table_start = tmp;
1268
1269 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1270 SMU7_FIRMWARE_HEADER_LOCATION +
1271 offsetof(SMU73_Firmware_Header, FanTable),
1272 &tmp, data->sram_end);
1273
1274 if (!result)
1275 data->fan_table_start = tmp;
1276
1277 error |= (0 != result);
1278
1279 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1280 SMU7_FIRMWARE_HEADER_LOCATION +
1281 offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
1282 &tmp, data->sram_end);
1283
1284 if (!result)
1285 data->arb_table_start = tmp;
1286
1287 error |= (0 != result);
1288
1289 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1290 SMU7_FIRMWARE_HEADER_LOCATION +
1291 offsetof(SMU73_Firmware_Header, Version),
1292 &tmp, data->sram_end);
1293
1294 if (!result)
1295 hwmgr->microcode_version_info.SMC = tmp;
1296
1297 error |= (0 != result);
1298
1299 return error ? -1 : 0;
1300}
1301
1302/* Copy one arb setting to another and then switch the active set.
1303 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
1304 */
1305static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
1306 uint32_t arb_src, uint32_t arb_dest)
1307{
1308 uint32_t mc_arb_dram_timing;
1309 uint32_t mc_arb_dram_timing2;
1310 uint32_t burst_time;
1311 uint32_t mc_cg_config;
1312
1313 switch (arb_src) {
1314 case MC_CG_ARB_FREQ_F0:
1315 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1316 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1317 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1318 break;
1319 case MC_CG_ARB_FREQ_F1:
1320 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
1321 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
1322 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
1323 break;
1324 default:
1325 return -EINVAL;
1326 }
1327
1328 switch (arb_dest) {
1329 case MC_CG_ARB_FREQ_F0:
1330 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
1331 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
1332 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
1333 break;
1334 case MC_CG_ARB_FREQ_F1:
1335 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
1336 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
1337 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
1338 break;
1339 default:
1340 return -EINVAL;
1341 }
1342
1343 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
1344 mc_cg_config |= 0x0000000F;
1345 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
1346 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
1347
1348 return 0;
1349}
1350
1351/**
1352* Initial switch from ARB F0->F1
1353*
1354* @param hwmgr the address of the powerplay hardware manager.
1355* @return always 0
1356* This function is to be called from the SetPowerState table.
1357*/
1358static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
1359{
1360 return fiji_copy_and_switch_arb_sets(hwmgr,
1361 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1362}
1363
1364static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
1365 struct fiji_single_dpm_table *dpm_table, uint32_t count)
1366{
1367 int i;
1368 PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
1369 "Fatal error, can not set up single DPM table entries "
1370 "to exceed max number!",);
1371
1372 dpm_table->count = count;
1373 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
1374 dpm_table->dpm_levels[i].enabled = false;
1375
1376 return 0;
1377}
1378
1379static void fiji_setup_pcie_table_entry(
1380 struct fiji_single_dpm_table *dpm_table,
1381 uint32_t index, uint32_t pcie_gen,
1382 uint32_t pcie_lanes)
1383{
1384 dpm_table->dpm_levels[index].value = pcie_gen;
1385 dpm_table->dpm_levels[index].param1 = pcie_lanes;
1386 dpm_table->dpm_levels[index].enabled = 1;
1387}
1388
1389static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1390{
1391 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1392 struct phm_ppt_v1_information *table_info =
1393 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1394 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1395 uint32_t i, max_entry;
1396
1397 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
1398 data->use_pcie_power_saving_levels), "No pcie performance levels!",
1399 return -EINVAL);
1400
1401 if (data->use_pcie_performance_levels &&
1402 !data->use_pcie_power_saving_levels) {
1403 data->pcie_gen_power_saving = data->pcie_gen_performance;
1404 data->pcie_lane_power_saving = data->pcie_lane_performance;
1405 } else if (!data->use_pcie_performance_levels &&
1406 data->use_pcie_power_saving_levels) {
1407 data->pcie_gen_performance = data->pcie_gen_power_saving;
1408 data->pcie_lane_performance = data->pcie_lane_power_saving;
1409 }
1410
1411 fiji_reset_single_dpm_table(hwmgr,
1412 &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
1413
1414 if (pcie_table != NULL) {
1415 /* max_entry is used to make sure we reserve one PCIE level
1416 * for boot level (fix for A+A PSPP issue).
1417 * If PCIE table from PPTable have ULV entry + 8 entries,
1418 * then ignore the last entry.*/
1419 max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
1420 SMU73_MAX_LEVELS_LINK : pcie_table->count;
1421 for (i = 1; i < max_entry; i++) {
1422 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
1423 get_pcie_gen_support(data->pcie_gen_cap,
1424 pcie_table->entries[i].gen_speed),
1425 get_pcie_lane_support(data->pcie_lane_cap,
1426 pcie_table->entries[i].lane_width));
1427 }
1428 data->dpm_table.pcie_speed_table.count = max_entry - 1;
1429 } else {
1430 /* Hardcode Pcie Table */
1431 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
1432 get_pcie_gen_support(data->pcie_gen_cap,
1433 PP_Min_PCIEGen),
1434 get_pcie_lane_support(data->pcie_lane_cap,
1435 PP_Max_PCIELane));
1436 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
1437 get_pcie_gen_support(data->pcie_gen_cap,
1438 PP_Min_PCIEGen),
1439 get_pcie_lane_support(data->pcie_lane_cap,
1440 PP_Max_PCIELane));
1441 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
1442 get_pcie_gen_support(data->pcie_gen_cap,
1443 PP_Max_PCIEGen),
1444 get_pcie_lane_support(data->pcie_lane_cap,
1445 PP_Max_PCIELane));
1446 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
1447 get_pcie_gen_support(data->pcie_gen_cap,
1448 PP_Max_PCIEGen),
1449 get_pcie_lane_support(data->pcie_lane_cap,
1450 PP_Max_PCIELane));
1451 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
1452 get_pcie_gen_support(data->pcie_gen_cap,
1453 PP_Max_PCIEGen),
1454 get_pcie_lane_support(data->pcie_lane_cap,
1455 PP_Max_PCIELane));
1456 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
1457 get_pcie_gen_support(data->pcie_gen_cap,
1458 PP_Max_PCIEGen),
1459 get_pcie_lane_support(data->pcie_lane_cap,
1460 PP_Max_PCIELane));
1461
1462 data->dpm_table.pcie_speed_table.count = 6;
1463 }
1464 /* Populate last level for boot PCIE level, but do not increment count. */
1465 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
1466 data->dpm_table.pcie_speed_table.count,
1467 get_pcie_gen_support(data->pcie_gen_cap,
1468 PP_Min_PCIEGen),
1469 get_pcie_lane_support(data->pcie_lane_cap,
1470 PP_Max_PCIELane));
1471
1472 return 0;
1473}
1474
1475/*
1476 * This function is to initalize all DPM state tables
1477 * for SMU7 based on the dependency table.
1478 * Dynamic state patching function will then trim these
1479 * state tables to the allowed range based
1480 * on the power policy or external client requests,
1481 * such as UVD request, etc.
1482 */
1483static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1484{
1485 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1486 struct phm_ppt_v1_information *table_info =
1487 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1488 uint32_t i;
1489
1490 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
1491 table_info->vdd_dep_on_sclk;
1492 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1493 table_info->vdd_dep_on_mclk;
1494
1495 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
1496 "SCLK dependency table is missing. This table is mandatory",
1497 return -EINVAL);
1498 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
1499 "SCLK dependency table has to have is missing. "
1500 "This table is mandatory",
1501 return -EINVAL);
1502
1503 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
1504 "MCLK dependency table is missing. This table is mandatory",
1505 return -EINVAL);
1506 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1507 "MCLK dependency table has to have is missing. "
1508 "This table is mandatory",
1509 return -EINVAL);
1510
1511 /* clear the state table to reset everything to default */
1512 fiji_reset_single_dpm_table(hwmgr,
1513 &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
1514 fiji_reset_single_dpm_table(hwmgr,
1515 &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
1516
1517 /* Initialize Sclk DPM table based on allow Sclk values */
1518 data->dpm_table.sclk_table.count = 0;
1519 for (i = 0; i < dep_sclk_table->count; i++) {
1520 if (i == 0 || data->dpm_table.sclk_table.dpm_levels
1521 [data->dpm_table.sclk_table.count - 1].value !=
1522 dep_sclk_table->entries[i].clk) {
1523 data->dpm_table.sclk_table.dpm_levels
1524 [data->dpm_table.sclk_table.count].value =
1525 dep_sclk_table->entries[i].clk;
1526 data->dpm_table.sclk_table.dpm_levels
1527 [data->dpm_table.sclk_table.count].enabled =
1528 (i == 0) ? true : false;
1529 data->dpm_table.sclk_table.count++;
1530 }
1531 }
1532
1533 /* Initialize Mclk DPM table based on allow Mclk values */
1534 data->dpm_table.mclk_table.count = 0;
1535 for (i=0; i<dep_mclk_table->count; i++) {
1536 if ( i==0 || data->dpm_table.mclk_table.dpm_levels
1537 [data->dpm_table.mclk_table.count - 1].value !=
1538 dep_mclk_table->entries[i].clk) {
1539 data->dpm_table.mclk_table.dpm_levels
1540 [data->dpm_table.mclk_table.count].value =
1541 dep_mclk_table->entries[i].clk;
1542 data->dpm_table.mclk_table.dpm_levels
1543 [data->dpm_table.mclk_table.count].enabled =
1544 (i == 0) ? true : false;
1545 data->dpm_table.mclk_table.count++;
1546 }
1547 }
1548
1549 /* setup PCIE gen speed levels */
1550 fiji_setup_default_pcie_table(hwmgr);
1551
1552 /* save a copy of the default DPM table */
1553 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1554 sizeof(struct fiji_dpm_table));
1555
1556 return 0;
1557}
1558
1559/**
1560 * @brief PhwFiji_GetVoltageOrder
1561 * Returns index of requested voltage record in lookup(table)
1562 * @param lookup_table - lookup list to search in
1563 * @param voltage - voltage to look for
1564 * @return 0 on success
1565 */
1566uint8_t fiji_get_voltage_index(
1567 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
1568{
1569 uint8_t count = (uint8_t) (lookup_table->count);
1570 uint8_t i;
1571
1572 PP_ASSERT_WITH_CODE((NULL != lookup_table),
1573 "Lookup Table empty.", return 0);
1574 PP_ASSERT_WITH_CODE((0 != count),
1575 "Lookup Table empty.", return 0);
1576
1577 for (i = 0; i < lookup_table->count; i++) {
1578 /* find first voltage equal or bigger than requested */
1579 if (lookup_table->entries[i].us_vdd >= voltage)
1580 return i;
1581 }
1582 /* voltage is bigger than max voltage in the table */
1583 return i - 1;
1584}
1585
1586/**
1587* Preparation of vddc and vddgfx CAC tables for SMC.
1588*
1589* @param hwmgr the address of the hardware manager
1590* @param table the SMC DPM table structure to be populated
1591* @return always 0
1592*/
1593static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
1594 struct SMU73_Discrete_DpmTable *table)
1595{
1596 uint32_t count;
1597 uint8_t index;
1598 int result = 0;
1599 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1600 struct phm_ppt_v1_information *table_info =
1601 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1602 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
1603 table_info->vddc_lookup_table;
1604 /* tables is already swapped, so in order to use the value from it,
1605 * we need to swap it back.
1606 * We are populating vddc CAC data to BapmVddc table
1607 * in split and merged mode
1608 */
1609 for( count = 0; count<lookup_table->count; count++) {
1610 index = fiji_get_voltage_index(lookup_table,
1611 data->vddc_voltage_table.entries[count].value);
1612 table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
1613 (lookup_table->entries[index].us_cac_low *
1614 VOLTAGE_SCALE)) / 25);
1615 table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
1616 (lookup_table->entries[index].us_cac_high *
1617 VOLTAGE_SCALE)) / 25);
1618 }
1619
1620 return result;
1621}
1622
1623/**
1624* Preparation of voltage tables for SMC.
1625*
1626* @param hwmgr the address of the hardware manager
1627* @param table the SMC DPM table structure to be populated
1628* @return always 0
1629*/
1630
1631int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1632 struct SMU73_Discrete_DpmTable *table)
1633{
1634 int result;
1635
1636 result = fiji_populate_cac_table(hwmgr, table);
1637 PP_ASSERT_WITH_CODE(0 == result,
1638 "can not populate CAC voltage tables to SMC",
1639 return -EINVAL);
1640
1641 return 0;
1642}
1643
1644static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
1645 struct SMU73_Discrete_Ulv *state)
1646{
1647 int result = 0;
1648 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1649 struct phm_ppt_v1_information *table_info =
1650 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1651
1652 state->CcPwrDynRm = 0;
1653 state->CcPwrDynRm1 = 0;
1654
1655 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
1656 state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
1657 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
1658
1659 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
1660
1661 if (!result) {
1662 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
1663 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
1664 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
1665 }
1666 return result;
1667}
1668
1669static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
1670 struct SMU73_Discrete_DpmTable *table)
1671{
1672 return fiji_populate_ulv_level(hwmgr, &table->Ulv);
1673}
1674
1675static int32_t fiji_get_dpm_level_enable_mask_value(
1676 struct fiji_single_dpm_table* dpm_table)
1677{
1678 int32_t i;
1679 int32_t mask = 0;
1680
1681 for (i = dpm_table->count; i > 0; i--) {
1682 mask = mask << 1;
1683 if (dpm_table->dpm_levels[i - 1].enabled)
1684 mask |= 0x1;
1685 else
1686 mask &= 0xFFFFFFFE;
1687 }
1688 return mask;
1689}
1690
1691static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
1692 struct SMU73_Discrete_DpmTable *table)
1693{
1694 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1695 struct fiji_dpm_table *dpm_table = &data->dpm_table;
1696 int i;
1697
1698 /* Index (dpm_table->pcie_speed_table.count)
1699 * is reserved for PCIE boot level. */
1700 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1701 table->LinkLevel[i].PcieGenSpeed =
1702 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1703 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
1704 dpm_table->pcie_speed_table.dpm_levels[i].param1);
1705 table->LinkLevel[i].EnabledForActivity = 1;
1706 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
1707 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
1708 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
1709 }
1710
1711 data->smc_state_table.LinkLevelCount =
1712 (uint8_t)dpm_table->pcie_speed_table.count;
1713 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1714 fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1715
1716 return 0;
1717}
1718
1719/**
1720* Calculates the SCLK dividers using the provided engine clock
1721*
1722* @param hwmgr the address of the hardware manager
1723* @param clock the engine clock to use to populate the structure
1724* @param sclk the SMC SCLK structure to be populated
1725*/
1726static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
1727 uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
1728{
1729 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1730 struct pp_atomctrl_clock_dividers_vi dividers;
1731 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1732 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1733 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1734 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1735 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1736 uint32_t ref_clock;
1737 uint32_t ref_divider;
1738 uint32_t fbdiv;
1739 int result;
1740
1741 /* get the engine clock dividers for this clock value */
1742 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
1743
1744 PP_ASSERT_WITH_CODE(result == 0,
1745 "Error retrieving Engine Clock dividers from VBIOS.",
1746 return result);
1747
1748 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
1749 ref_clock = atomctrl_get_reference_clock(hwmgr);
1750 ref_divider = 1 + dividers.uc_pll_ref_div;
1751
1752 /* low 14 bits is fraction and high 12 bits is divider */
1753 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
1754
1755 /* SPLL_FUNC_CNTL setup */
1756 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1757 SPLL_REF_DIV, dividers.uc_pll_ref_div);
1758 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1759 SPLL_PDIV_A, dividers.uc_pll_post_div);
1760
1761 /* SPLL_FUNC_CNTL_3 setup*/
1762 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
1763 SPLL_FB_DIV, fbdiv);
1764
1765 /* set to use fractional accumulation*/
1766 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
1767 SPLL_DITHEN, 1);
1768
1769 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1770 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
1771 struct pp_atomctrl_internal_ss_info ssInfo;
1772
1773 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
1774 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
1775 vco_freq, &ssInfo)) {
1776 /*
1777 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
1778 * ss_info.speed_spectrum_rate -- in unit of khz
1779 *
1780 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
1781 */
1782 uint32_t clk_s = ref_clock * 5 /
1783 (ref_divider * ssInfo.speed_spectrum_rate);
1784 /* clkv = 2 * D * fbdiv / NS */
1785 uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
1786 fbdiv / (clk_s * 10000);
1787
1788 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
1789 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
1790 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
1791 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
1792 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
1793 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
1794 }
1795 }
1796
1797 sclk->SclkFrequency = clock;
1798 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
1799 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
1800 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
1801 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
1802 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
1803
1804 return 0;
1805}
1806
1807static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
1808{
1809 uint32_t i;
1810 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1811 struct pp_atomctrl_voltage_table *vddci_table =
1812 &(data->vddci_voltage_table);
1813
1814 for (i = 0; i < vddci_table->count; i++) {
1815 if (vddci_table->entries[i].value >= vddci)
1816 return vddci_table->entries[i].value;
1817 }
1818
1819 PP_ASSERT_WITH_CODE(false,
1820 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
1821 return vddci_table->entries[i].value);
1822}
1823
1824static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
1825 struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
1826 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1827{
1828 uint32_t i;
1829 uint16_t vddci;
1830 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1831
1832 *voltage = *mvdd = 0;
1833
1834 /* clock - voltage dependency table is empty table */
1835 if (dep_table->count == 0)
1836 return -EINVAL;
1837
1838 for (i = 0; i < dep_table->count; i++) {
1839 /* find first sclk bigger than request */
1840 if (dep_table->entries[i].clk >= clock) {
1841 *voltage |= (dep_table->entries[i].vddc *
1842 VOLTAGE_SCALE) << VDDC_SHIFT;
1843 if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
1844 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1845 VOLTAGE_SCALE) << VDDCI_SHIFT;
1846 else if (dep_table->entries[i].vddci)
1847 *voltage |= (dep_table->entries[i].vddci *
1848 VOLTAGE_SCALE) << VDDCI_SHIFT;
1849 else {
1850 vddci = fiji_find_closest_vddci(hwmgr,
1851 (dep_table->entries[i].vddc -
1852 (uint16_t)data->vddc_vddci_delta));
1853 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1854 }
1855
1856 if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1857 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
1858 VOLTAGE_SCALE;
1859 else if (dep_table->entries[i].mvdd)
1860 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
1861 VOLTAGE_SCALE;
1862
1863 *voltage |= 1 << PHASES_SHIFT;
1864 return 0;
1865 }
1866 }
1867
1868 /* sclk is bigger than max sclk in the dependence table */
1869 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1870
1871 if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
1872 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1873 VOLTAGE_SCALE) << VDDCI_SHIFT;
1874 else if (dep_table->entries[i-1].vddci) {
1875 vddci = fiji_find_closest_vddci(hwmgr,
1876 (dep_table->entries[i].vddc -
1877 (uint16_t)data->vddc_vddci_delta));
1878 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1879 }
1880
1881 if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1882 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
1883 else if (dep_table->entries[i].mvdd)
1884 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
1885
1886 return 0;
1887}
1888/**
1889* Populates single SMC SCLK structure using the provided engine clock
1890*
1891* @param hwmgr the address of the hardware manager
1892* @param clock the engine clock to use to populate the structure
1893* @param sclk the SMC SCLK structure to be populated
1894*/
1895
1896static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
1897 uint32_t clock, uint16_t sclk_al_threshold,
1898 struct SMU73_Discrete_GraphicsLevel *level)
1899{
1900 int result;
1901 /* PP_Clocks minClocks; */
1902 uint32_t threshold, mvdd;
1903 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1904 struct phm_ppt_v1_information *table_info =
1905 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1906
1907 result = fiji_calculate_sclk_params(hwmgr, clock, level);
1908
1909 /* populate graphics levels */
1910 result = fiji_get_dependency_volt_by_clk(hwmgr,
1911 table_info->vdd_dep_on_sclk, clock,
1912 &level->MinVoltage, &mvdd);
1913 PP_ASSERT_WITH_CODE((0 == result),
1914 "can not find VDDC voltage value for "
1915 "VDDC engine clock dependency table",
1916 return result);
1917
1918 level->SclkFrequency = clock;
1919 level->ActivityLevel = sclk_al_threshold;
1920 level->CcPwrDynRm = 0;
1921 level->CcPwrDynRm1 = 0;
1922 level->EnabledForActivity = 0;
1923 level->EnabledForThrottle = 1;
1924 level->UpHyst = 10;
1925 level->DownHyst = 0;
1926 level->VoltageDownHyst = 0;
1927 level->PowerThrottle = 0;
1928
1929 threshold = clock * data->fast_watermark_threshold / 100;
1930
1931 /*
1932 * TODO: get minimum clocks from dal configaration
1933 * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
1934 */
1935 /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
1936
1937 /* get level->DeepSleepDivId
1938 if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
1939 {
1940 level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
1941 } */
1942
1943 /* Default to slow, highest DPM level will be
1944 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
1945 */
1946 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1947
1948 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
1949 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
1950 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
1951 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
1952 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
1953 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
1954 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
1955 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
1956 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
1957
1958 return 0;
1959}
1960/**
1961* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1962*
1963* @param hwmgr the address of the hardware manager
1964*/
1965static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1966{
1967 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1968 struct fiji_dpm_table *dpm_table = &data->dpm_table;
1969 struct phm_ppt_v1_information *table_info =
1970 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1971 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1972 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
1973 int result = 0;
1974 uint32_t array = data->dpm_table_start +
1975 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
1976 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
1977 SMU73_MAX_LEVELS_GRAPHICS;
1978 struct SMU73_Discrete_GraphicsLevel *levels =
1979 data->smc_state_table.GraphicsLevel;
1980 uint32_t i, max_entry;
1981 uint8_t hightest_pcie_level_enabled = 0,
1982 lowest_pcie_level_enabled = 0,
1983 mid_pcie_level_enabled = 0,
1984 count = 0;
1985
1986 for (i = 0; i < dpm_table->sclk_table.count; i++) {
1987 result = fiji_populate_single_graphic_level(hwmgr,
1988 dpm_table->sclk_table.dpm_levels[i].value,
1989 (uint16_t)data->activity_target[i],
1990 &levels[i]);
1991 if (result)
1992 return result;
1993
1994 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
1995 if (i > 1)
1996 levels[i].DeepSleepDivId = 0;
1997 }
1998
1999 /* Only enable level 0 for now.*/
2000 levels[0].EnabledForActivity = 1;
2001
2002 /* set highest level watermark to high */
2003 levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
2004 PPSMC_DISPLAY_WATERMARK_HIGH;
2005
2006 data->smc_state_table.GraphicsDpmLevelCount =
2007 (uint8_t)dpm_table->sclk_table.count;
2008 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2009 fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2010
2011 if (pcie_table != NULL) {
2012 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
2013 "There must be 1 or more PCIE levels defined in PPTable.",
2014 return -EINVAL);
2015 max_entry = pcie_entry_cnt - 1;
2016 for (i = 0; i < dpm_table->sclk_table.count; i++)
2017 levels[i].pcieDpmLevel =
2018 (uint8_t) ((i < max_entry)? i : max_entry);
2019 } else {
2020 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2021 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2022 (1 << (hightest_pcie_level_enabled + 1))) != 0 ))
2023 hightest_pcie_level_enabled++;
2024
2025 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2026 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2027 (1 << lowest_pcie_level_enabled)) == 0 ))
2028 lowest_pcie_level_enabled++;
2029
2030 while ((count < hightest_pcie_level_enabled) &&
2031 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2032 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
2033 count++;
2034
2035 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
2036 hightest_pcie_level_enabled?
2037 (lowest_pcie_level_enabled + 1 + count) :
2038 hightest_pcie_level_enabled;
2039
2040 /* set pcieDpmLevel to hightest_pcie_level_enabled */
2041 for(i = 2; i < dpm_table->sclk_table.count; i++)
2042 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
2043
2044 /* set pcieDpmLevel to lowest_pcie_level_enabled */
2045 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
2046
2047 /* set pcieDpmLevel to mid_pcie_level_enabled */
2048 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
2049 }
2050 /* level count will send to smc once at init smc table and never change */
2051 result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
2052 (uint32_t)array_size, data->sram_end);
2053
2054 return result;
2055}
2056
2057/**
2058 * MCLK Frequency Ratio
2059 * SEQ_CG_RESP Bit[31:24] - 0x0
2060 * Bit[27:24] \96 DDR3 Frequency ratio
2061 * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
2062 * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
2063 * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
2064 * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
2065 * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
2066 * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
2067 * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
2068 * 400 < 0x7 <= 450MHz, 800 < 0xF
2069 */
2070static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
2071{
2072 if (mem_clock <= 10000) return 0x0;
2073 if (mem_clock <= 15000) return 0x1;
2074 if (mem_clock <= 20000) return 0x2;
2075 if (mem_clock <= 25000) return 0x3;
2076 if (mem_clock <= 30000) return 0x4;
2077 if (mem_clock <= 35000) return 0x5;
2078 if (mem_clock <= 40000) return 0x6;
2079 if (mem_clock <= 45000) return 0x7;
2080 if (mem_clock <= 50000) return 0x8;
2081 if (mem_clock <= 55000) return 0x9;
2082 if (mem_clock <= 60000) return 0xa;
2083 if (mem_clock <= 65000) return 0xb;
2084 if (mem_clock <= 70000) return 0xc;
2085 if (mem_clock <= 75000) return 0xd;
2086 if (mem_clock <= 80000) return 0xe;
2087 /* mem_clock > 800MHz */
2088 return 0xf;
2089}
2090
2091/**
2092* Populates the SMC MCLK structure using the provided memory clock
2093*
2094* @param hwmgr the address of the hardware manager
2095* @param clock the memory clock to use to populate the structure
2096* @param sclk the SMC SCLK structure to be populated
2097*/
2098static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
2099 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
2100{
2101 struct pp_atomctrl_memory_clock_param mem_param;
2102 int result;
2103
2104 result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
2105 PP_ASSERT_WITH_CODE((0 == result),
2106 "Failed to get Memory PLL Dividers.",);
2107
2108 /* Save the result data to outpupt memory level structure */
2109 mclk->MclkFrequency = clock;
2110 mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
2111 mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
2112
2113 return result;
2114}
2115
2116static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
2117 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
2118{
2119 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2120 struct phm_ppt_v1_information *table_info =
2121 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2122 int result = 0;
2123
2124 if (table_info->vdd_dep_on_mclk) {
2125 result = fiji_get_dependency_volt_by_clk(hwmgr,
2126 table_info->vdd_dep_on_mclk, clock,
2127 &mem_level->MinVoltage, &mem_level->MinMvdd);
2128 PP_ASSERT_WITH_CODE((0 == result),
2129 "can not find MinVddc voltage value from memory "
2130 "VDDC voltage dependency table", return result);
2131 }
2132
2133 mem_level->EnabledForThrottle = 1;
2134 mem_level->EnabledForActivity = 0;
2135 mem_level->UpHyst = 0;
2136 mem_level->DownHyst = 100;
2137 mem_level->VoltageDownHyst = 0;
2138 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
2139 mem_level->StutterEnable = false;
2140
2141 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2142
2143 /* enable stutter mode if all the follow condition applied
2144 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
2145 * &(data->DisplayTiming.numExistingDisplays));
2146 */
2147 data->display_timing.num_existing_displays = 1;
2148
2149 if ((data->mclk_stutter_mode_threshold) &&
2150 (clock <= data->mclk_stutter_mode_threshold) &&
2151 (!data->is_uvd_enabled) &&
2152 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
2153 STUTTER_ENABLE) & 0x1))
2154 mem_level->StutterEnable = true;
2155
2156 result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
2157 if (!result) {
2158 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
2159 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
2160 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
2161 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
2162 }
2163 return result;
2164}
2165
2166/**
2167* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2168*
2169* @param hwmgr the address of the hardware manager
2170*/
2171static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2172{
2173 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2174 struct fiji_dpm_table *dpm_table = &data->dpm_table;
2175 int result;
2176 /* populate MCLK dpm table to SMU7 */
2177 uint32_t array = data->dpm_table_start +
2178 offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
2179 uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
2180 SMU73_MAX_LEVELS_MEMORY;
2181 struct SMU73_Discrete_MemoryLevel *levels =
2182 data->smc_state_table.MemoryLevel;
2183 uint32_t i;
2184
2185 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2186 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2187 "can not populate memory level as memory clock is zero",
2188 return -EINVAL);
2189 result = fiji_populate_single_memory_level(hwmgr,
2190 dpm_table->mclk_table.dpm_levels[i].value,
2191 &levels[i]);
2192 if (result)
2193 return result;
2194 }
2195
2196 /* Only enable level 0 for now. */
2197 levels[0].EnabledForActivity = 1;
2198
2199 /* in order to prevent MC activity from stutter mode to push DPM up.
2200 * the UVD change complements this by putting the MCLK in
2201 * a higher state by default such that we are not effected by
2202 * up threshold or and MCLK DPM latency.
2203 */
2204 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
2205 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
2206
2207 data->smc_state_table.MemoryDpmLevelCount =
2208 (uint8_t)dpm_table->mclk_table.count;
2209 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
2210 fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2211 /* set highest level watermark to high */
2212 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
2213 PPSMC_DISPLAY_WATERMARK_HIGH;
2214
2215 /* level count will send to smc once at init smc table and never change */
2216 result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
2217 (uint32_t)array_size, data->sram_end);
2218
2219 return result;
2220}
2221
2222/**
2223* Populates the SMC MVDD structure using the provided memory clock.
2224*
2225* @param hwmgr the address of the hardware manager
2226* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2227* @param voltage the SMC VOLTAGE structure to be populated
2228*/
2229int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
2230 uint32_t mclk, SMIO_Pattern *smio_pat)
2231{
2232 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2233 struct phm_ppt_v1_information *table_info =
2234 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2235 uint32_t i = 0;
2236
2237 if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2238 /* find mvdd value which clock is more than request */
2239 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
2240 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
2241 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
2242 break;
2243 }
2244 }
2245 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
2246 "MVDD Voltage is outside the supported range.",
2247 return -EINVAL);
2248 } else
2249 return -EINVAL;
2250
2251 return 0;
2252}
2253
2254static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
2255 SMU73_Discrete_DpmTable *table)
2256{
2257 int result = 0;
2258 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2259 struct phm_ppt_v1_information *table_info =
2260 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2261 struct pp_atomctrl_clock_dividers_vi dividers;
2262 SMIO_Pattern vol_level;
2263 uint32_t mvdd;
2264 uint16_t us_mvdd;
2265 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2266 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2267
2268 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2269
2270 if (!data->sclk_dpm_key_disabled) {
2271 /* Get MinVoltage and Frequency from DPM0,
2272 * already converted to SMC_UL */
2273 table->ACPILevel.SclkFrequency =
2274 data->dpm_table.sclk_table.dpm_levels[0].value;
2275 result = fiji_get_dependency_volt_by_clk(hwmgr,
2276 table_info->vdd_dep_on_sclk,
2277 table->ACPILevel.SclkFrequency,
2278 &table->ACPILevel.MinVoltage, &mvdd);
2279 PP_ASSERT_WITH_CODE((0 == result),
2280 "Cannot find ACPI VDDC voltage value "
2281 "in Clock Dependency Table",);
2282 } else {
2283 table->ACPILevel.SclkFrequency =
2284 data->vbios_boot_state.sclk_bootup_value;
2285 table->ACPILevel.MinVoltage =
2286 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
2287 }
2288
2289 /* get the engine clock dividers for this clock value */
2290 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2291 table->ACPILevel.SclkFrequency, &dividers);
2292 PP_ASSERT_WITH_CODE(result == 0,
2293 "Error retrieving Engine Clock dividers from VBIOS.",
2294 return result);
2295
2296 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2297 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2298 table->ACPILevel.DeepSleepDivId = 0;
2299
2300 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
2301 SPLL_PWRON, 0);
2302 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
2303 SPLL_RESET, 1);
2304 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
2305 SCLK_MUX_SEL, 4);
2306
2307 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2308 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2309 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2310 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2311 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2312 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2313 table->ACPILevel.CcPwrDynRm = 0;
2314 table->ACPILevel.CcPwrDynRm1 = 0;
2315
2316 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2317 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2318 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
2319 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2320 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2321 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2322 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2323 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2324 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2325 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2326 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2327
2328 if (!data->mclk_dpm_key_disabled) {
2329 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
2330 table->MemoryACPILevel.MclkFrequency =
2331 data->dpm_table.mclk_table.dpm_levels[0].value;
2332 result = fiji_get_dependency_volt_by_clk(hwmgr,
2333 table_info->vdd_dep_on_mclk,
2334 table->MemoryACPILevel.MclkFrequency,
2335 &table->MemoryACPILevel.MinVoltage, &mvdd);
2336 PP_ASSERT_WITH_CODE((0 == result),
2337 "Cannot find ACPI VDDCI voltage value "
2338 "in Clock Dependency Table",);
2339 } else {
2340 table->MemoryACPILevel.MclkFrequency =
2341 data->vbios_boot_state.mclk_bootup_value;
2342 table->MemoryACPILevel.MinVoltage =
2343 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
2344 }
2345
2346 us_mvdd = 0;
2347 if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
2348 (data->mclk_dpm_key_disabled))
2349 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
2350 else {
2351 if (!fiji_populate_mvdd_value(hwmgr,
2352 data->dpm_table.mclk_table.dpm_levels[0].value,
2353 &vol_level))
2354 us_mvdd = vol_level.Voltage;
2355 }
2356
2357 table->MemoryACPILevel.MinMvdd =
2358 PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
2359
2360 table->MemoryACPILevel.EnabledForThrottle = 0;
2361 table->MemoryACPILevel.EnabledForActivity = 0;
2362 table->MemoryACPILevel.UpHyst = 0;
2363 table->MemoryACPILevel.DownHyst = 100;
2364 table->MemoryACPILevel.VoltageDownHyst = 0;
2365 table->MemoryACPILevel.ActivityLevel =
2366 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2367
2368 table->MemoryACPILevel.StutterEnable = false;
2369 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
2370 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
2371
2372 return result;
2373}
2374
2375static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
2376 SMU73_Discrete_DpmTable *table)
2377{
2378 int result = -EINVAL;
2379 uint8_t count;
2380 struct pp_atomctrl_clock_dividers_vi dividers;
2381 struct phm_ppt_v1_information *table_info =
2382 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2383 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2384 table_info->mm_dep_table;
2385 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2386
2387 table->VceLevelCount = (uint8_t)(mm_table->count);
2388 table->VceBootLevel = 0;
2389
2390 for(count = 0; count < table->VceLevelCount; count++) {
2391 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
2392 table->VceLevel[count].MinVoltage |=
2393 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
2394 table->VceLevel[count].MinVoltage |=
2395 ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
2396 VOLTAGE_SCALE) << VDDCI_SHIFT;
2397 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2398
2399 /*retrieve divider value for VBIOS */
2400 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2401 table->VceLevel[count].Frequency, &dividers);
2402 PP_ASSERT_WITH_CODE((0 == result),
2403 "can not find divide id for VCE engine clock",
2404 return result);
2405
2406 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2407
2408 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
2409 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
2410 }
2411 return result;
2412}
2413
2414static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
2415 SMU73_Discrete_DpmTable *table)
2416{
2417 int result = -EINVAL;
2418 uint8_t count;
2419 struct pp_atomctrl_clock_dividers_vi dividers;
2420 struct phm_ppt_v1_information *table_info =
2421 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2422 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2423 table_info->mm_dep_table;
2424 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2425
2426 table->AcpLevelCount = (uint8_t)(mm_table->count);
2427 table->AcpBootLevel = 0;
2428
2429 for (count = 0; count < table->AcpLevelCount; count++) {
2430 table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
2431 table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2432 VOLTAGE_SCALE) << VDDC_SHIFT;
2433 table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2434 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2435 table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2436
2437 /* retrieve divider value for VBIOS */
2438 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2439 table->AcpLevel[count].Frequency, &dividers);
2440 PP_ASSERT_WITH_CODE((0 == result),
2441 "can not find divide id for engine clock", return result);
2442
2443 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2444
2445 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
2446 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
2447 }
2448 return result;
2449}
2450
2451static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
2452 SMU73_Discrete_DpmTable *table)
2453{
2454 int result = -EINVAL;
2455 uint8_t count;
2456 struct pp_atomctrl_clock_dividers_vi dividers;
2457 struct phm_ppt_v1_information *table_info =
2458 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2459 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2460 table_info->mm_dep_table;
2461 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2462
2463 table->SamuBootLevel = 0;
2464 table->SamuLevelCount = (uint8_t)(mm_table->count);
2465
2466 for (count = 0; count < table->SamuLevelCount; count++) {
2467 /* not sure whether we need evclk or not */
2468 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
2469 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2470 VOLTAGE_SCALE) << VDDC_SHIFT;
2471 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2472 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2473 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2474
2475 /* retrieve divider value for VBIOS */
2476 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2477 table->SamuLevel[count].Frequency, &dividers);
2478 PP_ASSERT_WITH_CODE((0 == result),
2479 "can not find divide id for samu clock", return result);
2480
2481 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2482
2483 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
2484 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
2485 }
2486 return result;
2487}
2488
2489static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
2490 int32_t eng_clock, int32_t mem_clock,
2491 struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
2492{
2493 uint32_t dram_timing;
2494 uint32_t dram_timing2;
2495 uint32_t burstTime;
2496 ULONG state, trrds, trrdl;
2497 int result;
2498
2499 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
2500 eng_clock, mem_clock);
2501 PP_ASSERT_WITH_CODE(result == 0,
2502 "Error calling VBIOS to set DRAM_TIMING.", return result);
2503
2504 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
2505 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
2506 burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
2507
2508 state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
2509 trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
2510 trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
2511
2512 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
2513 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
2514 arb_regs->McArbBurstTime = (uint8_t)burstTime;
2515 arb_regs->TRRDS = (uint8_t)trrds;
2516 arb_regs->TRRDL = (uint8_t)trrdl;
2517
2518 return 0;
2519}
2520
2521static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
2522{
2523 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2524 struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
2525 uint32_t i, j;
2526 int result = 0;
2527
2528 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
2529 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
2530 result = fiji_populate_memory_timing_parameters(hwmgr,
2531 data->dpm_table.sclk_table.dpm_levels[i].value,
2532 data->dpm_table.mclk_table.dpm_levels[j].value,
2533 &arb_regs.entries[i][j]);
2534 if (result)
2535 break;
2536 }
2537 }
2538
2539 if (!result)
2540 result = fiji_copy_bytes_to_smc(
2541 hwmgr->smumgr,
2542 data->arb_table_start,
2543 (uint8_t *)&arb_regs,
2544 sizeof(SMU73_Discrete_MCArbDramTimingTable),
2545 data->sram_end);
2546 return result;
2547}
2548
2549static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
2550 struct SMU73_Discrete_DpmTable *table)
2551{
2552 int result = -EINVAL;
2553 uint8_t count;
2554 struct pp_atomctrl_clock_dividers_vi dividers;
2555 struct phm_ppt_v1_information *table_info =
2556 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2557 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2558 table_info->mm_dep_table;
2559 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2560
2561 table->UvdLevelCount = (uint8_t)(mm_table->count);
2562 table->UvdBootLevel = 0;
2563
2564 for (count = 0; count < table->UvdLevelCount; count++) {
2565 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
2566 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
2567 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2568 VOLTAGE_SCALE) << VDDC_SHIFT;
2569 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2570 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2571 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2572
2573 /* retrieve divider value for VBIOS */
2574 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2575 table->UvdLevel[count].VclkFrequency, &dividers);
2576 PP_ASSERT_WITH_CODE((0 == result),
2577 "can not find divide id for Vclk clock", return result);
2578
2579 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
2580
2581 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2582 table->UvdLevel[count].DclkFrequency, &dividers);
2583 PP_ASSERT_WITH_CODE((0 == result),
2584 "can not find divide id for Dclk clock", return result);
2585
2586 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
2587
2588 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
2589 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
2590 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
2591
2592 }
2593 return result;
2594}
2595
2596static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
2597 uint32_t value, uint32_t *boot_level)
2598{
2599 int result = -EINVAL;
2600 uint32_t i;
2601
2602 for (i = 0; i < table->count; i++) {
2603 if (value == table->dpm_levels[i].value) {
2604 *boot_level = i;
2605 result = 0;
2606 }
2607 }
2608 return result;
2609}
2610
2611static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
2612 struct SMU73_Discrete_DpmTable *table)
2613{
2614 int result = 0;
2615 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2616
2617 table->GraphicsBootLevel = 0;
2618 table->MemoryBootLevel = 0;
2619
2620 /* find boot level from dpm table */
2621 result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
2622 data->vbios_boot_state.sclk_bootup_value,
2623 (uint32_t *)&(table->GraphicsBootLevel));
2624
2625 result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
2626 data->vbios_boot_state.mclk_bootup_value,
2627 (uint32_t *)&(table->MemoryBootLevel));
2628
2629 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
2630 VOLTAGE_SCALE;
2631 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
2632 VOLTAGE_SCALE;
2633 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
2634 VOLTAGE_SCALE;
2635
2636 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
2637 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
2638 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
2639
2640 return 0;
2641}
2642
2643static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
2644{
2645 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2646 struct phm_ppt_v1_information *table_info =
2647 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2648 uint8_t count, level;
2649
2650 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
2651 for (level = 0; level < count; level++) {
2652 if(table_info->vdd_dep_on_sclk->entries[level].clk >=
2653 data->vbios_boot_state.sclk_bootup_value) {
2654 data->smc_state_table.GraphicsBootLevel = level;
2655 break;
2656 }
2657 }
2658
2659 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
2660 for (level = 0; level < count; level++) {
2661 if(table_info->vdd_dep_on_mclk->entries[level].clk >=
2662 data->vbios_boot_state.mclk_bootup_value) {
2663 data->smc_state_table.MemoryBootLevel = level;
2664 break;
2665 }
2666 }
2667
2668 return 0;
2669}
2670
2671static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
2672{
2673 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
2674 volt_with_cks, value;
2675 uint16_t clock_freq_u16;
2676 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2677 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
2678 volt_offset = 0;
2679 struct phm_ppt_v1_information *table_info =
2680 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2681 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2682 table_info->vdd_dep_on_sclk;
2683
2684 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
2685
2686 /* Read SMU_Eefuse to read and calculate RO and determine
2687 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
2688 */
2689 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2690 ixSMU_EFUSE_0 + (146 * 4));
2691 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2692 ixSMU_EFUSE_0 + (148 * 4));
2693 efuse &= 0xFF000000;
2694 efuse = efuse >> 24;
2695 efuse2 &= 0xF;
2696
2697 if (efuse2 == 1)
2698 ro = (2300 - 1350) * efuse / 255 + 1350;
2699 else
2700 ro = (2500 - 1000) * efuse / 255 + 1000;
2701
2702 if (ro >= 1660)
2703 type = 0;
2704 else
2705 type = 1;
2706
2707 /* Populate Stretch amount */
2708 data->smc_state_table.ClockStretcherAmount = stretch_amount;
2709
2710 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
2711 for (i = 0; i < sclk_table->count; i++) {
2712 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
2713 sclk_table->entries[i].cks_enable << i;
2714 volt_without_cks = (uint32_t)((14041 *
2715 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
2716 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
2717 volt_with_cks = (uint32_t)((13946 *
2718 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
2719 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
2720 if (volt_without_cks >= volt_with_cks)
2721 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
2722 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
2723 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
2724 }
2725
2726 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2727 STRETCH_ENABLE, 0x0);
2728 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2729 masterReset, 0x1);
2730 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2731 staticEnable, 0x1);
2732 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2733 masterReset, 0x0);
2734
2735 /* Populate CKS Lookup Table */
2736 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
2737 stretch_amount2 = 0;
2738 else if (stretch_amount == 3 || stretch_amount == 4)
2739 stretch_amount2 = 1;
2740 else {
2741 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2742 PHM_PlatformCaps_ClockStretcher);
2743 PP_ASSERT_WITH_CODE(false,
2744 "Stretch Amount in PPTable not supported\n",
2745 return -EINVAL);
2746 }
2747
2748 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2749 ixPWR_CKS_CNTL);
2750 value &= 0xFFC2FF87;
2751 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
2752 fiji_clock_stretcher_lookup_table[stretch_amount2][0];
2753 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
2754 fiji_clock_stretcher_lookup_table[stretch_amount2][1];
2755 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
2756 GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
2757 SclkFrequency) / 100);
2758 if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
2759 clock_freq_u16 &&
2760 fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
2761 clock_freq_u16) {
2762 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
2763 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
2764 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
2765 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
2766 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
2767 value |= (fiji_clock_stretch_amount_conversion
2768 [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
2769 [stretch_amount]) << 3;
2770 }
2771 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
2772 CKS_LOOKUPTableEntry[0].minFreq);
2773 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
2774 CKS_LOOKUPTableEntry[0].maxFreq);
2775 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
2776 fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
2777 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
2778 (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
2779
2780 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2781 ixPWR_CKS_CNTL, value);
2782
2783 /* Populate DDT Lookup Table */
2784 for (i = 0; i < 4; i++) {
2785 /* Assign the minimum and maximum VID stored
2786 * in the last row of Clock Stretcher Voltage Table.
2787 */
2788 data->smc_state_table.ClockStretcherDataTable.
2789 ClockStretcherDataTableEntry[i].minVID =
2790 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
2791 data->smc_state_table.ClockStretcherDataTable.
2792 ClockStretcherDataTableEntry[i].maxVID =
2793 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
2794 /* Loop through each SCLK and check the frequency
2795 * to see if it lies within the frequency for clock stretcher.
2796 */
2797 for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
2798 cks_setting = 0;
2799 clock_freq = PP_SMC_TO_HOST_UL(
2800 data->smc_state_table.GraphicsLevel[j].SclkFrequency);
2801 /* Check the allowed frequency against the sclk level[j].
2802 * Sclk's endianness has already been converted,
2803 * and it's in 10Khz unit,
2804 * as opposed to Data table, which is in Mhz unit.
2805 */
2806 if (clock_freq >=
2807 (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
2808 cks_setting |= 0x2;
2809 if (clock_freq <
2810 (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
2811 cks_setting |= 0x1;
2812 }
2813 data->smc_state_table.ClockStretcherDataTable.
2814 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
2815 }
2816 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
2817 ClockStretcherDataTable.
2818 ClockStretcherDataTableEntry[i].setting);
2819 }
2820
2821 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
2822 value &= 0xFFFFFFFE;
2823 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
2824
2825 return 0;
2826}
2827
2828/**
2829* Populates the SMC VRConfig field in DPM table.
2830*
2831* @param hwmgr the address of the hardware manager
2832* @param table the SMC DPM table structure to be populated
2833* @return always 0
2834*/
2835static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
2836 struct SMU73_Discrete_DpmTable *table)
2837{
2838 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2839 uint16_t config;
2840
2841 config = VR_MERGED_WITH_VDDC;
2842 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
2843
2844 /* Set Vddc Voltage Controller */
2845 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
2846 config = VR_SVI2_PLANE_1;
2847 table->VRConfig |= config;
2848 } else {
2849 PP_ASSERT_WITH_CODE(false,
2850 "VDDC should be on SVI2 control in merged mode!",);
2851 }
2852 /* Set Vddci Voltage Controller */
2853 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
2854 config = VR_SVI2_PLANE_2; /* only in merged mode */
2855 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2856 } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
2857 config = VR_SMIO_PATTERN_1;
2858 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2859 } else {
2860 config = VR_STATIC_VOLTAGE;
2861 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2862 }
2863 /* Set Mvdd Voltage Controller */
2864 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
2865 config = VR_SVI2_PLANE_2;
2866 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2867 } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
2868 config = VR_SMIO_PATTERN_2;
2869 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2870 } else {
2871 config = VR_STATIC_VOLTAGE;
2872 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2873 }
2874
2875 return 0;
2876}
2877
2878/**
2879* Initializes the SMC table and uploads it
2880*
2881* @param hwmgr the address of the powerplay hardware manager.
2882* @param pInput the pointer to input data (PowerState)
2883* @return always 0
2884*/
2885static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2886{
2887 int result;
2888 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2889 struct phm_ppt_v1_information *table_info =
2890 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2891 struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
2892 const struct fiji_ulv_parm *ulv = &(data->ulv);
2893 uint8_t i;
2894 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
2895
2896 result = fiji_setup_default_dpm_tables(hwmgr);
2897 PP_ASSERT_WITH_CODE(0 == result,
2898 "Failed to setup default DPM tables!", return result);
2899
2900 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
2901 fiji_populate_smc_voltage_tables(hwmgr, table);
2902
2903 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2904 PHM_PlatformCaps_AutomaticDCTransition))
2905 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2906
2907 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2908 PHM_PlatformCaps_StepVddc))
2909 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2910
2911 if (data->is_memory_gddr5)
2912 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2913
2914 if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
2915 result = fiji_populate_ulv_state(hwmgr, table);
2916 PP_ASSERT_WITH_CODE(0 == result,
2917 "Failed to initialize ULV state!", return result);
2918 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2919 ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
2920 }
2921
2922 result = fiji_populate_smc_link_level(hwmgr, table);
2923 PP_ASSERT_WITH_CODE(0 == result,
2924 "Failed to initialize Link Level!", return result);
2925
2926 result = fiji_populate_all_graphic_levels(hwmgr);
2927 PP_ASSERT_WITH_CODE(0 == result,
2928 "Failed to initialize Graphics Level!", return result);
2929
2930 result = fiji_populate_all_memory_levels(hwmgr);
2931 PP_ASSERT_WITH_CODE(0 == result,
2932 "Failed to initialize Memory Level!", return result);
2933
2934 result = fiji_populate_smc_acpi_level(hwmgr, table);
2935 PP_ASSERT_WITH_CODE(0 == result,
2936 "Failed to initialize ACPI Level!", return result);
2937
2938 result = fiji_populate_smc_vce_level(hwmgr, table);
2939 PP_ASSERT_WITH_CODE(0 == result,
2940 "Failed to initialize VCE Level!", return result);
2941
2942 result = fiji_populate_smc_acp_level(hwmgr, table);
2943 PP_ASSERT_WITH_CODE(0 == result,
2944 "Failed to initialize ACP Level!", return result);
2945
2946 result = fiji_populate_smc_samu_level(hwmgr, table);
2947 PP_ASSERT_WITH_CODE(0 == result,
2948 "Failed to initialize SAMU Level!", return result);
2949
2950 /* Since only the initial state is completely set up at this point
2951 * (the other states are just copies of the boot state) we only
2952 * need to populate the ARB settings for the initial state.
2953 */
2954 result = fiji_program_memory_timing_parameters(hwmgr);
2955 PP_ASSERT_WITH_CODE(0 == result,
2956 "Failed to Write ARB settings for the initial state.", return result);
2957
2958 result = fiji_populate_smc_uvd_level(hwmgr, table);
2959 PP_ASSERT_WITH_CODE(0 == result,
2960 "Failed to initialize UVD Level!", return result);
2961
2962 result = fiji_populate_smc_boot_level(hwmgr, table);
2963 PP_ASSERT_WITH_CODE(0 == result,
2964 "Failed to initialize Boot Level!", return result);
2965
2966 result = fiji_populate_smc_initailial_state(hwmgr);
2967 PP_ASSERT_WITH_CODE(0 == result,
2968 "Failed to initialize Boot State!", return result);
2969
2970 result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
2971 PP_ASSERT_WITH_CODE(0 == result,
2972 "Failed to populate BAPM Parameters!", return result);
2973
2974 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2975 PHM_PlatformCaps_ClockStretcher)) {
2976 result = fiji_populate_clock_stretcher_data_table(hwmgr);
2977 PP_ASSERT_WITH_CODE(0 == result,
2978 "Failed to populate Clock Stretcher Data Table!",
2979 return result);
2980 }
2981
2982 table->GraphicsVoltageChangeEnable = 1;
2983 table->GraphicsThermThrottleEnable = 1;
2984 table->GraphicsInterval = 1;
2985 table->VoltageInterval = 1;
2986 table->ThermalInterval = 1;
2987 table->TemperatureLimitHigh =
2988 table_info->cac_dtp_table->usTargetOperatingTemp *
2989 FIJI_Q88_FORMAT_CONVERSION_UNIT;
2990 table->TemperatureLimitLow =
2991 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2992 FIJI_Q88_FORMAT_CONVERSION_UNIT;
2993 table->MemoryVoltageChangeEnable = 1;
2994 table->MemoryInterval = 1;
2995 table->VoltageResponseTime = 0;
2996 table->PhaseResponseTime = 0;
2997 table->MemoryThermThrottleEnable = 1;
2998 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
2999 table->PCIeGenInterval = 1;
3000
3001 result = fiji_populate_vr_config(hwmgr, table);
3002 PP_ASSERT_WITH_CODE(0 == result,
3003 "Failed to populate VRConfig setting!", return result);
3004
3005 table->ThermGpio = 17;
3006 table->SclkStepSize = 0x4000;
3007
3008 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
3009 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
3010 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3011 PHM_PlatformCaps_RegulatorHot);
3012 } else {
3013 table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
3014 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3015 PHM_PlatformCaps_RegulatorHot);
3016 }
3017
3018 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3019 &gpio_pin)) {
3020 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
3021 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3022 PHM_PlatformCaps_AutomaticDCTransition);
3023 } else {
3024 table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
3025 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3026 PHM_PlatformCaps_AutomaticDCTransition);
3027 }
3028
3029 /* Thermal Output GPIO */
3030 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
3031 &gpio_pin)) {
3032 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3033 PHM_PlatformCaps_ThermalOutGPIO);
3034
3035 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
3036
3037 /* For porlarity read GPIOPAD_A with assigned Gpio pin
3038 * since VBIOS will program this register to set 'inactive state',
3039 * driver can then determine 'active state' from this and
3040 * program SMU with correct polarity
3041 */
3042 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
3043 (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
3044 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
3045
3046 /* if required, combine VRHot/PCC with thermal out GPIO */
3047 if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3048 PHM_PlatformCaps_RegulatorHot) &&
3049 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3050 PHM_PlatformCaps_CombinePCCWithThermalSignal))
3051 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
3052 } else {
3053 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3054 PHM_PlatformCaps_ThermalOutGPIO);
3055 table->ThermOutGpio = 17;
3056 table->ThermOutPolarity = 1;
3057 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
3058 }
3059
3060 for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
3061 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
3062
3063 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
3064 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
3065 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
3066 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
3067 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
3068 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
3069 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
3070 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
3071 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
3072
3073 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
3074 result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
3075 data->dpm_table_start +
3076 offsetof(SMU73_Discrete_DpmTable, SystemFlags),
3077 (uint8_t *)&(table->SystemFlags),
3078 sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
3079 data->sram_end);
3080 PP_ASSERT_WITH_CODE(0 == result,
3081 "Failed to upload dpm data to SMC memory!", return result);
3082
3083 return 0;
3084}
3085
3086/**
3087* Initialize the ARB DRAM timing table's index field.
3088*
3089* @param hwmgr the address of the powerplay hardware manager.
3090* @return always 0
3091*/
3092static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
3093{
3094 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3095 uint32_t tmp;
3096 int result;
3097
3098 /* This is a read-modify-write on the first byte of the ARB table.
3099 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
3100 * is the field 'current'.
3101 * This solution is ugly, but we never write the whole table only
3102 * individual fields in it.
3103 * In reality this field should not be in that structure
3104 * but in a soft register.
3105 */
3106 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
3107 data->arb_table_start, &tmp, data->sram_end);
3108
3109 if (result)
3110 return result;
3111
3112 tmp &= 0x00FFFFFF;
3113 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
3114
3115 return fiji_write_smc_sram_dword(hwmgr->smumgr,
3116 data->arb_table_start, tmp, data->sram_end);
3117}
3118
3119static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
3120{
3121 if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3122 PHM_PlatformCaps_RegulatorHot))
3123 return smum_send_msg_to_smc(hwmgr->smumgr,
3124 PPSMC_MSG_EnableVRHotGPIOInterrupt);
3125
3126 return 0;
3127}
3128
3129static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
3130{
3131 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3132 SCLK_PWRMGT_OFF, 0);
3133 return 0;
3134}
3135
3136static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
3137{
3138 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3139 struct fiji_ulv_parm *ulv = &(data->ulv);
3140
3141 if (ulv->ulv_supported)
3142 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
3143
3144 return 0;
3145}
3146
3147static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3148{
3149 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3150 PHM_PlatformCaps_SclkDeepSleep)) {
3151 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
3152 PP_ASSERT_WITH_CODE(false,
3153 "Attempt to enable Master Deep Sleep switch failed!",
3154 return -1);
3155 } else {
3156 if (smum_send_msg_to_smc(hwmgr->smumgr,
3157 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
3158 PP_ASSERT_WITH_CODE(false,
3159 "Attempt to disable Master Deep Sleep switch failed!",
3160 return -1);
3161 }
3162 }
3163
3164 return 0;
3165}
3166
3167static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3168{
3169 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3170 uint32_t val, val0, val2;
3171 uint32_t i, cpl_cntl, cpl_threshold, mc_threshold;
3172
3173 /* enable SCLK dpm */
3174 if(!data->sclk_dpm_key_disabled)
3175 PP_ASSERT_WITH_CODE(
3176 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
3177 "Failed to enable SCLK DPM during DPM Start Function!",
3178 return -1);
3179
3180 /* enable MCLK dpm */
3181 if(0 == data->mclk_dpm_key_disabled) {
3182 cpl_threshold = 0;
3183 mc_threshold = 0;
3184
3185 /* Read per MCD tile (0 - 7) */
3186 for (i = 0; i < 8; i++) {
3187 PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
3188 val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
3189 if (0xf0000000 != val) {
3190 /* count number of MCQ that has channel(s) enabled */
3191 cpl_threshold++;
3192 /* only harvest 3 or full 4 supported */
3193 mc_threshold = val ? 3 : 4;
3194 }
3195 }
3196 PP_ASSERT_WITH_CODE(0 != cpl_threshold,
3197 "Number of MCQ is zero!", return -EINVAL;);
3198
3199 mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
3200 LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
3201 LCAC_MC0_CNTL__MC0_ENABLE_MASK;
3202 cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
3203 LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
3204 LCAC_CPL_CNTL__CPL_ENABLE_MASK;
3205 cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
3206 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3207 ixLCAC_MC0_CNTL, mc_threshold);
3208 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3209 ixLCAC_MC1_CNTL, mc_threshold);
3210 if (8 == cpl_threshold) {
3211 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3212 ixLCAC_MC2_CNTL, mc_threshold);
3213 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3214 ixLCAC_MC3_CNTL, mc_threshold);
3215 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3216 ixLCAC_MC4_CNTL, mc_threshold);
3217 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3218 ixLCAC_MC5_CNTL, mc_threshold);
3219 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3220 ixLCAC_MC6_CNTL, mc_threshold);
3221 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3222 ixLCAC_MC7_CNTL, mc_threshold);
3223 }
3224 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3225 ixLCAC_CPL_CNTL, cpl_cntl);
3226
3227 udelay(5);
3228
3229 mc_threshold = mc_threshold |
3230 (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
3231 cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
3232 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3233 ixLCAC_MC0_CNTL, mc_threshold);
3234 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3235 ixLCAC_MC1_CNTL, mc_threshold);
3236 if (8 == cpl_threshold) {
3237 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3238 ixLCAC_MC2_CNTL, mc_threshold);
3239 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3240 ixLCAC_MC3_CNTL, mc_threshold);
3241 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3242 ixLCAC_MC4_CNTL, mc_threshold);
3243 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3244 ixLCAC_MC5_CNTL, mc_threshold);
3245 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3246 ixLCAC_MC6_CNTL, mc_threshold);
3247 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3248 ixLCAC_MC7_CNTL, mc_threshold);
3249 }
3250 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3251 ixLCAC_CPL_CNTL, cpl_cntl);
3252
3253 /* Program CAC_EN per MCD (0-7) Tile */
3254 val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
3255 val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
3256 MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
3257 MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
3258 MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
3259 MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
3260 MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
3261 MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
3262 MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
3263 MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
3264
3265 for (i = 0; i < 8; i++) {
3266 /* Enable MCD i Tile read & write */
3267 val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
3268 (1 << i));
3269 cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
3270 /* Enbale CAC_ON MCD i Tile */
3271 val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
3272 val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
3273 cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
3274 }
3275 /* Set MC_CONFIG_MCD back to its default setting val0 */
3276 cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
3277
3278 PP_ASSERT_WITH_CODE(
3279 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3280 PPSMC_MSG_MCLKDPM_Enable)),
3281 "Failed to enable MCLK DPM during DPM Start Function!",
3282 return -1);
3283 }
3284 return 0;
3285}
3286
3287static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
3288{
3289 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3290
3291 /*enable general power management */
3292 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3293 GLOBAL_PWRMGT_EN, 1);
3294 /* enable sclk deep sleep */
3295 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3296 DYNAMIC_PM_EN, 1);
3297 /* prepare for PCIE DPM */
3298 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3299 data->soft_regs_start + offsetof(SMU73_SoftRegisters,
3300 VoltageChangeTimeout), 0x1000);
3301 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
3302 SWRST_COMMAND_1, RESETLC, 0x0);
3303
3304 PP_ASSERT_WITH_CODE(
3305 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3306 PPSMC_MSG_Voltage_Cntl_Enable)),
3307 "Failed to enable voltage DPM during DPM Start Function!",
3308 return -1);
3309
3310 if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
3311 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
3312 return -1;
3313 }
3314
3315 /* enable PCIE dpm */
3316 if(!data->pcie_dpm_key_disabled) {
3317 PP_ASSERT_WITH_CODE(
3318 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3319 PPSMC_MSG_PCIeDPM_Enable)),
3320 "Failed to enable pcie DPM during DPM Start Function!",
3321 return -1);
3322 }
3323
3324 return 0;
3325}
3326
3327static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
3328 uint32_t sources)
3329{
3330 bool protection;
3331 enum DPM_EVENT_SRC src;
3332
3333 switch (sources) {
3334 default:
3335 printk(KERN_ERR "Unknown throttling event sources.");
3336 /* fall through */
3337 case 0:
3338 protection = false;
3339 /* src is unused */
3340 break;
3341 case (1 << PHM_AutoThrottleSource_Thermal):
3342 protection = true;
3343 src = DPM_EVENT_SRC_DIGITAL;
3344 break;
3345 case (1 << PHM_AutoThrottleSource_External):
3346 protection = true;
3347 src = DPM_EVENT_SRC_EXTERNAL;
3348 break;
3349 case (1 << PHM_AutoThrottleSource_External) |
3350 (1 << PHM_AutoThrottleSource_Thermal):
3351 protection = true;
3352 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
3353 break;
3354 }
3355 /* Order matters - don't enable thermal protection for the wrong source. */
3356 if (protection) {
3357 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
3358 DPM_EVENT_SRC, src);
3359 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3360 THERMAL_PROTECTION_DIS,
3361 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3362 PHM_PlatformCaps_ThermalController));
3363 } else
3364 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3365 THERMAL_PROTECTION_DIS, 1);
3366}
3367
3368static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3369 PHM_AutoThrottleSource source)
3370{
3371 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3372
3373 if (!(data->active_auto_throttle_sources & (1 << source))) {
3374 data->active_auto_throttle_sources |= 1 << source;
3375 fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3376 }
3377 return 0;
3378}
3379
3380static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3381{
3382 return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3383}
3384
3385static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
3386{
3387 int tmp_result, result = 0;
3388
3389 tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
3390 PP_ASSERT_WITH_CODE(result == 0,
3391 "DPM is already running right now, no need to enable DPM!",
3392 return 0);
3393
3394 if (fiji_voltage_control(hwmgr)) {
3395 tmp_result = fiji_enable_voltage_control(hwmgr);
3396 PP_ASSERT_WITH_CODE(tmp_result == 0,
3397 "Failed to enable voltage control!",
3398 result = tmp_result);
3399 }
3400
3401 if (fiji_voltage_control(hwmgr)) {
3402 tmp_result = fiji_construct_voltage_tables(hwmgr);
3403 PP_ASSERT_WITH_CODE((0 == tmp_result),
3404 "Failed to contruct voltage tables!",
3405 result = tmp_result);
3406 }
3407
3408 tmp_result = fiji_initialize_mc_reg_table(hwmgr);
3409 PP_ASSERT_WITH_CODE((0 == tmp_result),
3410 "Failed to initialize MC reg table!", result = tmp_result);
3411
3412 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3413 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
3414 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3415 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
3416
3417 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3418 PHM_PlatformCaps_ThermalController))
3419 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3420 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
3421
3422 tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
3423 PP_ASSERT_WITH_CODE((0 == tmp_result),
3424 "Failed to program static screen threshold parameters!",
3425 result = tmp_result);
3426
3427 tmp_result = fiji_enable_display_gap(hwmgr);
3428 PP_ASSERT_WITH_CODE((0 == tmp_result),
3429 "Failed to enable display gap!", result = tmp_result);
3430
3431 tmp_result = fiji_program_voting_clients(hwmgr);
3432 PP_ASSERT_WITH_CODE((0 == tmp_result),
3433 "Failed to program voting clients!", result = tmp_result);
3434
3435 tmp_result = fiji_process_firmware_header(hwmgr);
3436 PP_ASSERT_WITH_CODE((0 == tmp_result),
3437 "Failed to process firmware header!", result = tmp_result);
3438
3439 tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
3440 PP_ASSERT_WITH_CODE((0 == tmp_result),
3441 "Failed to initialize switch from ArbF0 to F1!",
3442 result = tmp_result);
3443
3444 tmp_result = fiji_init_smc_table(hwmgr);
3445 PP_ASSERT_WITH_CODE((0 == tmp_result),
3446 "Failed to initialize SMC table!", result = tmp_result);
3447
3448 tmp_result = fiji_init_arb_table_index(hwmgr);
3449 PP_ASSERT_WITH_CODE((0 == tmp_result),
3450 "Failed to initialize ARB table index!", result = tmp_result);
3451
3452 tmp_result = fiji_populate_pm_fuses(hwmgr);
3453 PP_ASSERT_WITH_CODE((0 == tmp_result),
3454 "Failed to populate PM fuses!", result = tmp_result);
3455
3456 tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
3457 PP_ASSERT_WITH_CODE((0 == tmp_result),
3458 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
3459
3460 tmp_result = tonga_notify_smc_display_change(hwmgr, false);
3461 PP_ASSERT_WITH_CODE((0 == tmp_result),
3462 "Failed to notify no display!", result = tmp_result);
3463
3464 tmp_result = fiji_enable_sclk_control(hwmgr);
3465 PP_ASSERT_WITH_CODE((0 == tmp_result),
3466 "Failed to enable SCLK control!", result = tmp_result);
3467
3468 tmp_result = fiji_enable_ulv(hwmgr);
3469 PP_ASSERT_WITH_CODE((0 == tmp_result),
3470 "Failed to enable ULV!", result = tmp_result);
3471
3472 tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
3473 PP_ASSERT_WITH_CODE((0 == tmp_result),
3474 "Failed to enable deep sleep master switch!", result = tmp_result);
3475
3476 tmp_result = fiji_start_dpm(hwmgr);
3477 PP_ASSERT_WITH_CODE((0 == tmp_result),
3478 "Failed to start DPM!", result = tmp_result);
3479
3480 tmp_result = fiji_enable_smc_cac(hwmgr);
3481 PP_ASSERT_WITH_CODE((0 == tmp_result),
3482 "Failed to enable SMC CAC!", result = tmp_result);
3483
3484 tmp_result = fiji_enable_power_containment(hwmgr);
3485 PP_ASSERT_WITH_CODE((0 == tmp_result),
3486 "Failed to enable power containment!", result = tmp_result);
3487
3488 tmp_result = fiji_power_control_set_level(hwmgr);
3489 PP_ASSERT_WITH_CODE((0 == tmp_result),
3490 "Failed to power control set level!", result = tmp_result);
3491
3492 tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
3493 PP_ASSERT_WITH_CODE((0 == tmp_result),
3494 "Failed to enable thermal auto throttle!", result = tmp_result);
3495
3496 return result;
3497}
3498
3499static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
3500{
3501 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3502 uint32_t level, tmp;
3503
3504 if (!data->sclk_dpm_key_disabled) {
3505 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3506 level = 0;
3507 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3508 while (tmp >>= 1)
3509 level++;
3510 if (level)
3511 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3512 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3513 (1 << level));
3514 }
3515 }
3516
3517 if (!data->mclk_dpm_key_disabled) {
3518 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3519 level = 0;
3520 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3521 while (tmp >>= 1)
3522 level++;
3523 if (level)
3524 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3525 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3526 (1 << level));
3527 }
3528 }
3529
3530 if (!data->pcie_dpm_key_disabled) {
3531 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3532 level = 0;
3533 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3534 while (tmp >>= 1)
3535 level++;
3536 if (level)
3537 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3538 PPSMC_MSG_PCIeDPM_ForceLevel,
3539 (1 << level));
3540 }
3541 }
3542 return 0;
3543}
3544
3545static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
3546{
3547 struct phm_ppt_v1_information *table_info =
3548 (struct phm_ppt_v1_information *)hwmgr->pptable;
3549 struct phm_clock_voltage_dependency_table *table =
3550 table_info->vddc_dep_on_dal_pwrl;
3551 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
3552 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
3553 uint32_t req_vddc = 0, req_volt, i;
3554
3555 if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
3556 dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
3557 return;
3558
3559 for (i= 0; i < table->count; i++) {
3560 if (dal_power_level == table->entries[i].clk) {
3561 req_vddc = table->entries[i].v;
3562 break;
3563 }
3564 }
3565
3566 vddc_table = table_info->vdd_dep_on_sclk;
3567 for (i= 0; i < vddc_table->count; i++) {
3568 if (req_vddc <= vddc_table->entries[i].vddc) {
3569 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
3570 << VDDC_SHIFT;
3571 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3572 PPSMC_MSG_VddC_Request, req_volt);
3573 return;
3574 }
3575 }
3576 printk(KERN_ERR "DAL requested level can not"
3577 " found a available voltage in VDDC DPM Table \n");
3578}
3579
3580static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
3581{
3582 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3583
3584 fiji_apply_dal_min_voltage_request(hwmgr);
3585
3586 if (!data->sclk_dpm_key_disabled) {
3587 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3588 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3589 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3590 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3591 }
3592 return 0;
3593}
3594
3595static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3596{
3597 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3598
3599 if (!fiji_is_dpm_running(hwmgr))
3600 return -EINVAL;
3601
3602 if (!data->pcie_dpm_key_disabled) {
3603 smum_send_msg_to_smc(hwmgr->smumgr,
3604 PPSMC_MSG_PCIeDPM_UnForceLevel);
3605 }
3606
3607 return fiji_upload_dpmlevel_enable_mask(hwmgr);
3608}
3609
3610static uint32_t fiji_get_lowest_enabled_level(
3611 struct pp_hwmgr *hwmgr, uint32_t mask)
3612{
3613 uint32_t level = 0;
3614
3615 while(0 == (mask & (1 << level)))
3616 level++;
3617
3618 return level;
3619}
3620
3621static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3622{
3623 struct fiji_hwmgr *data =
3624 (struct fiji_hwmgr *)(hwmgr->backend);
3625 uint32_t level;
3626
3627 if (!data->sclk_dpm_key_disabled)
3628 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3629 level = fiji_get_lowest_enabled_level(hwmgr,
3630 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3631 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3632 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3633 (1 << level));
3634
3635 }
3636
3637 if (!data->mclk_dpm_key_disabled) {
3638 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3639 level = fiji_get_lowest_enabled_level(hwmgr,
3640 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3641 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3642 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3643 (1 << level));
3644 }
3645 }
3646
3647 if (!data->pcie_dpm_key_disabled) {
3648 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3649 level = fiji_get_lowest_enabled_level(hwmgr,
3650 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3651 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3652 PPSMC_MSG_PCIeDPM_ForceLevel,
3653 (1 << level));
3654 }
3655 }
3656
3657 return 0;
3658
3659}
3660static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
3661 enum amd_dpm_forced_level level)
3662{
3663 int ret = 0;
3664
3665 switch (level) {
3666 case AMD_DPM_FORCED_LEVEL_HIGH:
3667 ret = fiji_force_dpm_highest(hwmgr);
3668 if (ret)
3669 return ret;
3670 break;
3671 case AMD_DPM_FORCED_LEVEL_LOW:
3672 ret = fiji_force_dpm_lowest(hwmgr);
3673 if (ret)
3674 return ret;
3675 break;
3676 case AMD_DPM_FORCED_LEVEL_AUTO:
3677 ret = fiji_unforce_dpm_levels(hwmgr);
3678 if (ret)
3679 return ret;
3680 break;
3681 default:
3682 break;
3683 }
3684
3685 hwmgr->dpm_level = level;
3686
3687 return ret;
3688}
3689
3690static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
3691{
3692 return sizeof(struct fiji_power_state);
3693}
3694
3695static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3696 void *state, struct pp_power_state *power_state,
3697 void *pp_table, uint32_t classification_flag)
3698{
3699 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3700 struct fiji_power_state *fiji_power_state =
3701 (struct fiji_power_state *)(&(power_state->hardware));
3702 struct fiji_performance_level *performance_level;
3703 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3704 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3705 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3706 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
3707 (ATOM_Tonga_SCLK_Dependency_Table *)
3708 (((unsigned long)powerplay_table) +
3709 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3710 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3711 (ATOM_Tonga_MCLK_Dependency_Table *)
3712 (((unsigned long)powerplay_table) +
3713 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3714
3715 /* The following fields are not initialized here: id orderedList allStatesList */
3716 power_state->classification.ui_label =
3717 (le16_to_cpu(state_entry->usClassification) &
3718 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3719 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3720 power_state->classification.flags = classification_flag;
3721 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3722
3723 power_state->classification.temporary_state = false;
3724 power_state->classification.to_be_deleted = false;
3725
3726 power_state->validation.disallowOnDC =
3727 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3728 ATOM_Tonga_DISALLOW_ON_DC));
3729
3730 power_state->pcie.lanes = 0;
3731
3732 power_state->display.disableFrameModulation = false;
3733 power_state->display.limitRefreshrate = false;
3734 power_state->display.enableVariBright =
3735 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3736 ATOM_Tonga_ENABLE_VARIBRIGHT));
3737
3738 power_state->validation.supportedPowerLevels = 0;
3739 power_state->uvd_clocks.VCLK = 0;
3740 power_state->uvd_clocks.DCLK = 0;
3741 power_state->temperatures.min = 0;
3742 power_state->temperatures.max = 0;
3743
3744 performance_level = &(fiji_power_state->performance_levels
3745 [fiji_power_state->performance_level_count++]);
3746
3747 PP_ASSERT_WITH_CODE(
3748 (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
3749 "Performance levels exceeds SMC limit!",
3750 return -1);
3751
3752 PP_ASSERT_WITH_CODE(
3753 (fiji_power_state->performance_level_count <=
3754 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3755 "Performance levels exceeds Driver limit!",
3756 return -1);
3757
3758 /* Performance levels are arranged from low to high. */
3759 performance_level->memory_clock = mclk_dep_table->entries
3760 [state_entry->ucMemoryClockIndexLow].ulMclk;
3761 performance_level->engine_clock = sclk_dep_table->entries
3762 [state_entry->ucEngineClockIndexLow].ulSclk;
3763 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3764 state_entry->ucPCIEGenLow);
3765 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3766 state_entry->ucPCIELaneHigh);
3767
3768 performance_level = &(fiji_power_state->performance_levels
3769 [fiji_power_state->performance_level_count++]);
3770 performance_level->memory_clock = mclk_dep_table->entries
3771 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3772 performance_level->engine_clock = sclk_dep_table->entries
3773 [state_entry->ucEngineClockIndexHigh].ulSclk;
3774 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3775 state_entry->ucPCIEGenHigh);
3776 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3777 state_entry->ucPCIELaneHigh);
3778
3779 return 0;
3780}
3781
3782static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3783 unsigned long entry_index, struct pp_power_state *state)
3784{
3785 int result;
3786 struct fiji_power_state *ps;
3787 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3788 struct phm_ppt_v1_information *table_info =
3789 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3790 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3791 table_info->vdd_dep_on_mclk;
3792
3793 state->hardware.magic = PHM_VIslands_Magic;
3794
3795 ps = (struct fiji_power_state *)(&state->hardware);
3796
3797 result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
3798 fiji_get_pp_table_entry_callback_func);
3799
3800 /* This is the earliest time we have all the dependency table and the VBIOS boot state
3801 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3802 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3803 */
3804 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3805 if (dep_mclk_table->entries[0].clk !=
3806 data->vbios_boot_state.mclk_bootup_value)
3807 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3808 "does not match VBIOS boot MCLK level");
3809 if (dep_mclk_table->entries[0].vddci !=
3810 data->vbios_boot_state.vddci_bootup_value)
3811 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3812 "does not match VBIOS boot VDDCI level");
3813 }
3814
3815 /* set DC compatible flag if this state supports DC */
3816 if (!state->validation.disallowOnDC)
3817 ps->dc_compatible = true;
3818
3819 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3820 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3821
3822 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3823 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3824
3825 if (!result) {
3826 uint32_t i;
3827
3828 switch (state->classification.ui_label) {
3829 case PP_StateUILabel_Performance:
3830 data->use_pcie_performance_levels = true;
3831
3832 for (i = 0; i < ps->performance_level_count; i++) {
3833 if (data->pcie_gen_performance.max <
3834 ps->performance_levels[i].pcie_gen)
3835 data->pcie_gen_performance.max =
3836 ps->performance_levels[i].pcie_gen;
3837
3838 if (data->pcie_gen_performance.min >
3839 ps->performance_levels[i].pcie_gen)
3840 data->pcie_gen_performance.min =
3841 ps->performance_levels[i].pcie_gen;
3842
3843 if (data->pcie_lane_performance.max <
3844 ps->performance_levels[i].pcie_lane)
3845 data->pcie_lane_performance.max =
3846 ps->performance_levels[i].pcie_lane;
3847
3848 if (data->pcie_lane_performance.min >
3849 ps->performance_levels[i].pcie_lane)
3850 data->pcie_lane_performance.min =
3851 ps->performance_levels[i].pcie_lane;
3852 }
3853 break;
3854 case PP_StateUILabel_Battery:
3855 data->use_pcie_power_saving_levels = true;
3856
3857 for (i = 0; i < ps->performance_level_count; i++) {
3858 if (data->pcie_gen_power_saving.max <
3859 ps->performance_levels[i].pcie_gen)
3860 data->pcie_gen_power_saving.max =
3861 ps->performance_levels[i].pcie_gen;
3862
3863 if (data->pcie_gen_power_saving.min >
3864 ps->performance_levels[i].pcie_gen)
3865 data->pcie_gen_power_saving.min =
3866 ps->performance_levels[i].pcie_gen;
3867
3868 if (data->pcie_lane_power_saving.max <
3869 ps->performance_levels[i].pcie_lane)
3870 data->pcie_lane_power_saving.max =
3871 ps->performance_levels[i].pcie_lane;
3872
3873 if (data->pcie_lane_power_saving.min >
3874 ps->performance_levels[i].pcie_lane)
3875 data->pcie_lane_power_saving.min =
3876 ps->performance_levels[i].pcie_lane;
3877 }
3878 break;
3879 default:
3880 break;
3881 }
3882 }
3883 return 0;
3884}
3885
3886static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3887 struct pp_power_state *request_ps,
3888 const struct pp_power_state *current_ps)
3889{
3890 struct fiji_power_state *fiji_ps =
3891 cast_phw_fiji_power_state(&request_ps->hardware);
3892 uint32_t sclk;
3893 uint32_t mclk;
3894 struct PP_Clocks minimum_clocks = {0};
3895 bool disable_mclk_switching;
3896 bool disable_mclk_switching_for_frame_lock;
3897 struct cgs_display_info info = {0};
3898 const struct phm_clock_and_voltage_limits *max_limits;
3899 uint32_t i;
3900 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3901 struct phm_ppt_v1_information *table_info =
3902 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3903 int32_t count;
3904 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3905
3906 data->battery_state = (PP_StateUILabel_Battery ==
3907 request_ps->classification.ui_label);
3908
3909 PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
3910 "VI should always have 2 performance levels",);
3911
3912 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3913 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3914 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3915
3916 /* Cap clock DPM tables at DC MAX if it is in DC. */
3917 if (PP_PowerSource_DC == hwmgr->power_source) {
3918 for (i = 0; i < fiji_ps->performance_level_count; i++) {
3919 if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
3920 fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
3921 if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
3922 fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
3923 }
3924 }
3925
3926 fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3927 fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3928
3929 fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
3930
3931 cgs_get_active_displays_info(hwmgr->device, &info);
3932
3933 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3934
3935 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
3936
3937 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3938 PHM_PlatformCaps_StablePState)) {
3939 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3940 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3941
3942 for (count = table_info->vdd_dep_on_sclk->count - 1;
3943 count >= 0; count--) {
3944 if (stable_pstate_sclk >=
3945 table_info->vdd_dep_on_sclk->entries[count].clk) {
3946 stable_pstate_sclk =
3947 table_info->vdd_dep_on_sclk->entries[count].clk;
3948 break;
3949 }
3950 }
3951
3952 if (count < 0)
3953 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3954
3955 stable_pstate_mclk = max_limits->mclk;
3956
3957 minimum_clocks.engineClock = stable_pstate_sclk;
3958 minimum_clocks.memoryClock = stable_pstate_mclk;
3959 }
3960
3961 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3962 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3963
3964 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3965 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3966
3967 fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3968
3969 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
3970 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3971 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3972 "Overdrive sclk exceeds limit",
3973 hwmgr->gfx_arbiter.sclk_over_drive =
3974 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3975
3976 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3977 fiji_ps->performance_levels[1].engine_clock =
3978 hwmgr->gfx_arbiter.sclk_over_drive;
3979 }
3980
3981 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
3982 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3983 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3984 "Overdrive mclk exceeds limit",
3985 hwmgr->gfx_arbiter.mclk_over_drive =
3986 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3987
3988 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3989 fiji_ps->performance_levels[1].memory_clock =
3990 hwmgr->gfx_arbiter.mclk_over_drive;
3991 }
3992
3993 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3994 hwmgr->platform_descriptor.platformCaps,
3995 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3996
3997 disable_mclk_switching = (1 < info.display_count) ||
3998 disable_mclk_switching_for_frame_lock;
3999
4000 sclk = fiji_ps->performance_levels[0].engine_clock;
4001 mclk = fiji_ps->performance_levels[0].memory_clock;
4002
4003 if (disable_mclk_switching)
4004 mclk = fiji_ps->performance_levels
4005 [fiji_ps->performance_level_count - 1].memory_clock;
4006
4007 if (sclk < minimum_clocks.engineClock)
4008 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
4009 max_limits->sclk : minimum_clocks.engineClock;
4010
4011 if (mclk < minimum_clocks.memoryClock)
4012 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
4013 max_limits->mclk : minimum_clocks.memoryClock;
4014
4015 fiji_ps->performance_levels[0].engine_clock = sclk;
4016 fiji_ps->performance_levels[0].memory_clock = mclk;
4017
4018 fiji_ps->performance_levels[1].engine_clock =
4019 (fiji_ps->performance_levels[1].engine_clock >=
4020 fiji_ps->performance_levels[0].engine_clock) ?
4021 fiji_ps->performance_levels[1].engine_clock :
4022 fiji_ps->performance_levels[0].engine_clock;
4023
4024 if (disable_mclk_switching) {
4025 if (mclk < fiji_ps->performance_levels[1].memory_clock)
4026 mclk = fiji_ps->performance_levels[1].memory_clock;
4027
4028 fiji_ps->performance_levels[0].memory_clock = mclk;
4029 fiji_ps->performance_levels[1].memory_clock = mclk;
4030 } else {
4031 if (fiji_ps->performance_levels[1].memory_clock <
4032 fiji_ps->performance_levels[0].memory_clock)
4033 fiji_ps->performance_levels[1].memory_clock =
4034 fiji_ps->performance_levels[0].memory_clock;
4035 }
4036
4037 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4038 PHM_PlatformCaps_StablePState)) {
4039 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4040 fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4041 fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4042 fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4043 fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4044 }
4045 }
4046
4047 return 0;
4048}
4049
4050static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4051{
4052 const struct phm_set_power_state_input *states =
4053 (const struct phm_set_power_state_input *)input;
4054 const struct fiji_power_state *fiji_ps =
4055 cast_const_phw_fiji_power_state(states->pnew_state);
4056 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4057 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4058 uint32_t sclk = fiji_ps->performance_levels
4059 [fiji_ps->performance_level_count - 1].engine_clock;
4060 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4061 uint32_t mclk = fiji_ps->performance_levels
4062 [fiji_ps->performance_level_count - 1].memory_clock;
4063 struct PP_Clocks min_clocks = {0};
4064 uint32_t i;
4065 struct cgs_display_info info = {0};
4066
4067 data->need_update_smu7_dpm_table = 0;
4068
4069 for (i = 0; i < sclk_table->count; i++) {
4070 if (sclk == sclk_table->dpm_levels[i].value)
4071 break;
4072 }
4073
4074 if (i >= sclk_table->count)
4075 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4076 else {
4077 /* TODO: Check SCLK in DAL's minimum clocks
4078 * in case DeepSleep divider update is required.
4079 */
4080 if(data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR)
4081 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4082 }
4083
4084 for (i = 0; i < mclk_table->count; i++) {
4085 if (mclk == mclk_table->dpm_levels[i].value)
4086 break;
4087 }
4088
4089 if (i >= mclk_table->count)
4090 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4091
4092 cgs_get_active_displays_info(hwmgr->device, &info);
4093
4094 if (data->display_timing.num_existing_displays != info.display_count)
4095 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4096
4097 return 0;
4098}
4099
4100static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4101 const struct fiji_power_state *fiji_ps)
4102{
4103 uint32_t i;
4104 uint32_t sclk, max_sclk = 0;
4105 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4106 struct fiji_dpm_table *dpm_table = &data->dpm_table;
4107
4108 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4109 sclk = fiji_ps->performance_levels[i].engine_clock;
4110 if (max_sclk < sclk)
4111 max_sclk = sclk;
4112 }
4113
4114 for (i = 0; i < dpm_table->sclk_table.count; i++) {
4115 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4116 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4117 dpm_table->pcie_speed_table.dpm_levels
4118 [dpm_table->pcie_speed_table.count - 1].value :
4119 dpm_table->pcie_speed_table.dpm_levels[i].value);
4120 }
4121
4122 return 0;
4123}
4124
4125static int fiji_request_link_speed_change_before_state_change(
4126 struct pp_hwmgr *hwmgr, const void *input)
4127{
4128 const struct phm_set_power_state_input *states =
4129 (const struct phm_set_power_state_input *)input;
4130 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4131 const struct fiji_power_state *fiji_nps =
4132 cast_const_phw_fiji_power_state(states->pnew_state);
4133 const struct fiji_power_state *fiji_cps =
4134 cast_const_phw_fiji_power_state(states->pcurrent_state);
4135
4136 uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
4137 uint16_t current_link_speed;
4138
4139 if (data->force_pcie_gen == PP_PCIEGenInvalid)
4140 current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
4141 else
4142 current_link_speed = data->force_pcie_gen;
4143
4144 data->force_pcie_gen = PP_PCIEGenInvalid;
4145 data->pspp_notify_required = false;
4146 if (target_link_speed > current_link_speed) {
4147 switch(target_link_speed) {
4148 case PP_PCIEGen3:
4149 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
4150 break;
4151 data->force_pcie_gen = PP_PCIEGen2;
4152 if (current_link_speed == PP_PCIEGen2)
4153 break;
4154 case PP_PCIEGen2:
4155 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
4156 break;
4157 default:
4158 data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
4159 break;
4160 }
4161 } else {
4162 if (target_link_speed < current_link_speed)
4163 data->pspp_notify_required = true;
4164 }
4165
4166 return 0;
4167}
4168
4169static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4170{
4171 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4172
4173 if (0 == data->need_update_smu7_dpm_table)
4174 return 0;
4175
4176 if ((0 == data->sclk_dpm_key_disabled) &&
4177 (data->need_update_smu7_dpm_table &
4178 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4179 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
4180 "Trying to freeze SCLK DPM when DPM is disabled",);
4181 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4182 PPSMC_MSG_SCLKDPM_FreezeLevel),
4183 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4184 return -1);
4185 }
4186
4187 if ((0 == data->mclk_dpm_key_disabled) &&
4188 (data->need_update_smu7_dpm_table &
4189 DPMTABLE_OD_UPDATE_MCLK)) {
4190 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
4191 "Trying to freeze MCLK DPM when DPM is disabled",);
4192 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4193 PPSMC_MSG_MCLKDPM_FreezeLevel),
4194 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4195 return -1);
4196 }
4197
4198 return 0;
4199}
4200
4201static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
4202 struct pp_hwmgr *hwmgr, const void *input)
4203{
4204 int result = 0;
4205 const struct phm_set_power_state_input *states =
4206 (const struct phm_set_power_state_input *)input;
4207 const struct fiji_power_state *fiji_ps =
4208 cast_const_phw_fiji_power_state(states->pnew_state);
4209 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4210 uint32_t sclk = fiji_ps->performance_levels
4211 [fiji_ps->performance_level_count - 1].engine_clock;
4212 uint32_t mclk = fiji_ps->performance_levels
4213 [fiji_ps->performance_level_count - 1].memory_clock;
4214 struct fiji_dpm_table *dpm_table = &data->dpm_table;
4215
4216 struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
4217 uint32_t dpm_count, clock_percent;
4218 uint32_t i;
4219
4220 if (0 == data->need_update_smu7_dpm_table)
4221 return 0;
4222
4223 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4224 dpm_table->sclk_table.dpm_levels
4225 [dpm_table->sclk_table.count - 1].value = sclk;
4226
4227 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4228 PHM_PlatformCaps_OD6PlusinACSupport) ||
4229 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4230 PHM_PlatformCaps_OD6PlusinDCSupport)) {
4231 /* Need to do calculation based on the golden DPM table
4232 * as the Heatmap GPU Clock axis is also based on the default values
4233 */
4234 PP_ASSERT_WITH_CODE(
4235 (golden_dpm_table->sclk_table.dpm_levels
4236 [golden_dpm_table->sclk_table.count - 1].value != 0),
4237 "Divide by 0!",
4238 return -1);
4239 dpm_count = dpm_table->sclk_table.count < 2 ?
4240 0 : dpm_table->sclk_table.count - 2;
4241 for (i = dpm_count; i > 1; i--) {
4242 if (sclk > golden_dpm_table->sclk_table.dpm_levels
4243 [golden_dpm_table->sclk_table.count-1].value) {
4244 clock_percent =
4245 ((sclk - golden_dpm_table->sclk_table.dpm_levels
4246 [golden_dpm_table->sclk_table.count-1].value) * 100) /
4247 golden_dpm_table->sclk_table.dpm_levels
4248 [golden_dpm_table->sclk_table.count-1].value;
4249
4250 dpm_table->sclk_table.dpm_levels[i].value =
4251 golden_dpm_table->sclk_table.dpm_levels[i].value +
4252 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4253 clock_percent)/100;
4254
4255 } else if (golden_dpm_table->sclk_table.dpm_levels
4256 [dpm_table->sclk_table.count-1].value > sclk) {
4257 clock_percent =
4258 ((golden_dpm_table->sclk_table.dpm_levels
4259 [golden_dpm_table->sclk_table.count - 1].value - sclk) *
4260 100) /
4261 golden_dpm_table->sclk_table.dpm_levels
4262 [golden_dpm_table->sclk_table.count-1].value;
4263
4264 dpm_table->sclk_table.dpm_levels[i].value =
4265 golden_dpm_table->sclk_table.dpm_levels[i].value -
4266 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4267 clock_percent) / 100;
4268 } else
4269 dpm_table->sclk_table.dpm_levels[i].value =
4270 golden_dpm_table->sclk_table.dpm_levels[i].value;
4271 }
4272 }
4273 }
4274
4275 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4276 dpm_table->mclk_table.dpm_levels
4277 [dpm_table->mclk_table.count - 1].value = mclk;
4278
4279 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4280 PHM_PlatformCaps_OD6PlusinACSupport) ||
4281 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4282 PHM_PlatformCaps_OD6PlusinDCSupport)) {
4283
4284 PP_ASSERT_WITH_CODE(
4285 (golden_dpm_table->mclk_table.dpm_levels
4286 [golden_dpm_table->mclk_table.count-1].value != 0),
4287 "Divide by 0!",
4288 return -1);
4289 dpm_count = dpm_table->mclk_table.count < 2 ?
4290 0 : dpm_table->mclk_table.count - 2;
4291 for (i = dpm_count; i > 1; i--) {
4292 if (mclk > golden_dpm_table->mclk_table.dpm_levels
4293 [golden_dpm_table->mclk_table.count-1].value) {
4294 clock_percent = ((mclk -
4295 golden_dpm_table->mclk_table.dpm_levels
4296 [golden_dpm_table->mclk_table.count-1].value) * 100) /
4297 golden_dpm_table->mclk_table.dpm_levels
4298 [golden_dpm_table->mclk_table.count-1].value;
4299
4300 dpm_table->mclk_table.dpm_levels[i].value =
4301 golden_dpm_table->mclk_table.dpm_levels[i].value +
4302 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4303 clock_percent) / 100;
4304
4305 } else if (golden_dpm_table->mclk_table.dpm_levels
4306 [dpm_table->mclk_table.count-1].value > mclk) {
4307 clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
4308 [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
4309 golden_dpm_table->mclk_table.dpm_levels
4310 [golden_dpm_table->mclk_table.count-1].value;
4311
4312 dpm_table->mclk_table.dpm_levels[i].value =
4313 golden_dpm_table->mclk_table.dpm_levels[i].value -
4314 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4315 clock_percent) / 100;
4316 } else
4317 dpm_table->mclk_table.dpm_levels[i].value =
4318 golden_dpm_table->mclk_table.dpm_levels[i].value;
4319 }
4320 }
4321 }
4322
4323 if (data->need_update_smu7_dpm_table &
4324 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4325 result = fiji_populate_all_memory_levels(hwmgr);
4326 PP_ASSERT_WITH_CODE((0 == result),
4327 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4328 return result);
4329 }
4330
4331 if (data->need_update_smu7_dpm_table &
4332 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4333 /*populate MCLK dpm table to SMU7 */
4334 result = fiji_populate_all_memory_levels(hwmgr);
4335 PP_ASSERT_WITH_CODE((0 == result),
4336 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4337 return result);
4338 }
4339
4340 return result;
4341}
4342
4343static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4344 struct fiji_single_dpm_table * dpm_table,
4345 uint32_t low_limit, uint32_t high_limit)
4346{
4347 uint32_t i;
4348
4349 for (i = 0; i < dpm_table->count; i++) {
4350 if ((dpm_table->dpm_levels[i].value < low_limit) ||
4351 (dpm_table->dpm_levels[i].value > high_limit))
4352 dpm_table->dpm_levels[i].enabled = false;
4353 else
4354 dpm_table->dpm_levels[i].enabled = true;
4355 }
4356 return 0;
4357}
4358
4359static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
4360 const struct fiji_power_state *fiji_ps)
4361{
4362 int result = 0;
4363 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4364 uint32_t high_limit_count;
4365
4366 PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
4367 "power state did not have any performance level",
4368 return -1);
4369
4370 high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
4371
4372 fiji_trim_single_dpm_states(hwmgr,
4373 &(data->dpm_table.sclk_table),
4374 fiji_ps->performance_levels[0].engine_clock,
4375 fiji_ps->performance_levels[high_limit_count].engine_clock);
4376
4377 fiji_trim_single_dpm_states(hwmgr,
4378 &(data->dpm_table.mclk_table),
4379 fiji_ps->performance_levels[0].memory_clock,
4380 fiji_ps->performance_levels[high_limit_count].memory_clock);
4381
4382 return result;
4383}
4384
4385static int fiji_generate_dpm_level_enable_mask(
4386 struct pp_hwmgr *hwmgr, const void *input)
4387{
4388 int result;
4389 const struct phm_set_power_state_input *states =
4390 (const struct phm_set_power_state_input *)input;
4391 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4392 const struct fiji_power_state *fiji_ps =
4393 cast_const_phw_fiji_power_state(states->pnew_state);
4394
4395 result = fiji_trim_dpm_states(hwmgr, fiji_ps);
4396 if (result)
4397 return result;
4398
4399 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4400 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4401 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4402 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4403 data->last_mclk_dpm_enable_mask =
4404 data->dpm_level_enable_mask.mclk_dpm_enable_mask;
4405
4406 if (data->uvd_enabled) {
4407 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4408 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4409 }
4410
4411 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4412 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4413
4414 return 0;
4415}
4416
4417int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4418{
4419 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
4420 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
4421 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
4422}
4423
4424int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
4425{
4426 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4427 PPSMC_MSG_VCEDPM_Enable :
4428 PPSMC_MSG_VCEDPM_Disable);
4429}
4430
4431int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
4432{
4433 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4434 PPSMC_MSG_SAMUDPM_Enable :
4435 PPSMC_MSG_SAMUDPM_Disable);
4436}
4437
4438int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
4439{
4440 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4441 PPSMC_MSG_ACPDPM_Enable :
4442 PPSMC_MSG_ACPDPM_Disable);
4443}
4444
4445int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4446{
4447 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4448 uint32_t mm_boot_level_offset, mm_boot_level_value;
4449 struct phm_ppt_v1_information *table_info =
4450 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4451
4452 if (!bgate) {
4453 data->smc_state_table.UvdBootLevel = 0;
4454 if (table_info->mm_dep_table->count > 0)
4455 data->smc_state_table.UvdBootLevel =
4456 (uint8_t) (table_info->mm_dep_table->count - 1);
4457 mm_boot_level_offset = data->dpm_table_start +
4458 offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
4459 mm_boot_level_offset /= 4;
4460 mm_boot_level_offset *= 4;
4461 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4462 CGS_IND_REG__SMC, mm_boot_level_offset);
4463 mm_boot_level_value &= 0x00FFFFFF;
4464 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
4465 cgs_write_ind_register(hwmgr->device,
4466 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4467
4468 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4469 PHM_PlatformCaps_UVDDPM) ||
4470 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4471 PHM_PlatformCaps_StablePState))
4472 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4473 PPSMC_MSG_UVDDPM_SetEnabledMask,
4474 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
4475 }
4476
4477 return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
4478}
4479
4480int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
4481{
4482 const struct phm_set_power_state_input *states =
4483 (const struct phm_set_power_state_input *)input;
4484 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4485 const struct fiji_power_state *fiji_nps =
4486 cast_const_phw_fiji_power_state(states->pnew_state);
4487 const struct fiji_power_state *fiji_cps =
4488 cast_const_phw_fiji_power_state(states->pcurrent_state);
4489
4490 uint32_t mm_boot_level_offset, mm_boot_level_value;
4491 struct phm_ppt_v1_information *table_info =
4492 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4493
4494 if (fiji_nps->vce_clks.evclk >0 &&
4495 (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
4496 data->smc_state_table.VceBootLevel =
4497 (uint8_t) (table_info->mm_dep_table->count - 1);
4498
4499 mm_boot_level_offset = data->dpm_table_start +
4500 offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
4501 mm_boot_level_offset /= 4;
4502 mm_boot_level_offset *= 4;
4503 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4504 CGS_IND_REG__SMC, mm_boot_level_offset);
4505 mm_boot_level_value &= 0xFF00FFFF;
4506 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
4507 cgs_write_ind_register(hwmgr->device,
4508 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4509
4510 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4511 PHM_PlatformCaps_StablePState)) {
4512 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4513 PPSMC_MSG_VCEDPM_SetEnabledMask,
4514 (uint32_t)1 << data->smc_state_table.VceBootLevel);
4515
4516 fiji_enable_disable_vce_dpm(hwmgr, true);
4517 } else if (fiji_nps->vce_clks.evclk == 0 &&
4518 fiji_cps != NULL &&
4519 fiji_cps->vce_clks.evclk > 0)
4520 fiji_enable_disable_vce_dpm(hwmgr, false);
4521 }
4522
4523 return 0;
4524}
4525
4526int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4527{
4528 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4529 uint32_t mm_boot_level_offset, mm_boot_level_value;
4530 struct phm_ppt_v1_information *table_info =
4531 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4532
4533 if (!bgate) {
4534 data->smc_state_table.SamuBootLevel =
4535 (uint8_t) (table_info->mm_dep_table->count - 1);
4536 mm_boot_level_offset = data->dpm_table_start +
4537 offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
4538 mm_boot_level_offset /= 4;
4539 mm_boot_level_offset *= 4;
4540 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4541 CGS_IND_REG__SMC, mm_boot_level_offset);
4542 mm_boot_level_value &= 0xFFFFFF00;
4543 mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
4544 cgs_write_ind_register(hwmgr->device,
4545 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4546
4547 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4548 PHM_PlatformCaps_StablePState))
4549 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4550 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4551 (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
4552 }
4553
4554 return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
4555}
4556
4557int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4558{
4559 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4560 uint32_t mm_boot_level_offset, mm_boot_level_value;
4561 struct phm_ppt_v1_information *table_info =
4562 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4563
4564 if (!bgate) {
4565 data->smc_state_table.AcpBootLevel =
4566 (uint8_t) (table_info->mm_dep_table->count - 1);
4567 mm_boot_level_offset = data->dpm_table_start +
4568 offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
4569 mm_boot_level_offset /= 4;
4570 mm_boot_level_offset *= 4;
4571 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4572 CGS_IND_REG__SMC, mm_boot_level_offset);
4573 mm_boot_level_value &= 0xFFFF00FF;
4574 mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
4575 cgs_write_ind_register(hwmgr->device,
4576 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4577
4578 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4579 PHM_PlatformCaps_StablePState))
4580 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4581 PPSMC_MSG_ACPDPM_SetEnabledMask,
4582 (uint32_t)(1 << data->smc_state_table.AcpBootLevel));
4583 }
4584
4585 return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
4586}
4587
4588static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4589{
4590 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4591
4592 int result = 0;
4593 uint32_t low_sclk_interrupt_threshold = 0;
4594
4595 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4596 PHM_PlatformCaps_SclkThrottleLowNotification)
4597 && (hwmgr->gfx_arbiter.sclk_threshold !=
4598 data->low_sclk_interrupt_threshold)) {
4599 data->low_sclk_interrupt_threshold =
4600 hwmgr->gfx_arbiter.sclk_threshold;
4601 low_sclk_interrupt_threshold =
4602 data->low_sclk_interrupt_threshold;
4603
4604 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4605
4606 result = fiji_copy_bytes_to_smc(
4607 hwmgr->smumgr,
4608 data->dpm_table_start +
4609 offsetof(SMU73_Discrete_DpmTable,
4610 LowSclkInterruptThreshold),
4611 (uint8_t *)&low_sclk_interrupt_threshold,
4612 sizeof(uint32_t),
4613 data->sram_end);
4614 }
4615
4616 return result;
4617}
4618
4619static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
4620{
4621 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4622
4623 if (data->need_update_smu7_dpm_table &
4624 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4625 return fiji_program_memory_timing_parameters(hwmgr);
4626
4627 return 0;
4628}
4629
4630static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4631{
4632 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4633
4634 if (0 == data->need_update_smu7_dpm_table)
4635 return 0;
4636
4637 if ((0 == data->sclk_dpm_key_disabled) &&
4638 (data->need_update_smu7_dpm_table &
4639 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4640
4641 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
4642 "Trying to Unfreeze SCLK DPM when DPM is disabled",);
4643 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4644 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4645 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4646 return -1);
4647 }
4648
4649 if ((0 == data->mclk_dpm_key_disabled) &&
4650 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4651
4652 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
4653 "Trying to Unfreeze MCLK DPM when DPM is disabled",);
4654 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4655 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4656 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4657 return -1);
4658 }
4659
4660 data->need_update_smu7_dpm_table = 0;
4661
4662 return 0;
4663}
4664
4665/* Look up the voltaged based on DAL's requested level.
4666 * and then send the requested VDDC voltage to SMC
4667 */
4668static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
4669{
4670 return;
4671}
4672
4673int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
4674{
4675 int result;
4676 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4677
4678 /* Apply minimum voltage based on DAL's request level */
4679 fiji_apply_dal_minimum_voltage_request(hwmgr);
4680
4681 if (0 == data->sclk_dpm_key_disabled) {
4682 /* Checking if DPM is running. If we discover hang because of this,
4683 * we should skip this message.
4684 */
4685 if (!fiji_is_dpm_running(hwmgr))
4686 printk(KERN_ERR "[ powerplay ] "
4687 "Trying to set Enable Mask when DPM is disabled \n");
4688
4689 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4690 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4691 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4692 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4693 PP_ASSERT_WITH_CODE((0 == result),
4694 "Set Sclk Dpm enable Mask failed", return -1);
4695 }
4696 }
4697
4698 if (0 == data->mclk_dpm_key_disabled) {
4699 /* Checking if DPM is running. If we discover hang because of this,
4700 * we should skip this message.
4701 */
4702 if (!fiji_is_dpm_running(hwmgr))
4703 printk(KERN_ERR "[ powerplay ]"
4704 " Trying to set Enable Mask when DPM is disabled \n");
4705
4706 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4707 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4708 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4709 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
4710 PP_ASSERT_WITH_CODE((0 == result),
4711 "Set Mclk Dpm enable Mask failed", return -1);
4712 }
4713 }
4714
4715 return 0;
4716}
4717
4718static int fiji_notify_link_speed_change_after_state_change(
4719 struct pp_hwmgr *hwmgr, const void *input)
4720{
4721 const struct phm_set_power_state_input *states =
4722 (const struct phm_set_power_state_input *)input;
4723 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4724 const struct fiji_power_state *fiji_ps =
4725 cast_const_phw_fiji_power_state(states->pnew_state);
4726 uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
4727 uint8_t request;
4728
4729 if (data->pspp_notify_required) {
4730 if (target_link_speed == PP_PCIEGen3)
4731 request = PCIE_PERF_REQ_GEN3;
4732 else if (target_link_speed == PP_PCIEGen2)
4733 request = PCIE_PERF_REQ_GEN2;
4734 else
4735 request = PCIE_PERF_REQ_GEN1;
4736
4737 if(request == PCIE_PERF_REQ_GEN1 &&
4738 fiji_get_current_pcie_speed(hwmgr) > 0)
4739 return 0;
4740
4741 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
4742 if (PP_PCIEGen2 == target_link_speed)
4743 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4744 else
4745 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4746 }
4747 }
4748
4749 return 0;
4750}
4751
4752static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
4753 const void *input)
4754{
4755 int tmp_result, result = 0;
4756
4757 tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4758 PP_ASSERT_WITH_CODE((0 == tmp_result),
4759 "Failed to find DPM states clocks in DPM table!",
4760 result = tmp_result);
4761
4762 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4763 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4764 tmp_result =
4765 fiji_request_link_speed_change_before_state_change(hwmgr, input);
4766 PP_ASSERT_WITH_CODE((0 == tmp_result),
4767 "Failed to request link speed change before state change!",
4768 result = tmp_result);
4769 }
4770
4771 tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
4772 PP_ASSERT_WITH_CODE((0 == tmp_result),
4773 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4774
4775 tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4776 PP_ASSERT_WITH_CODE((0 == tmp_result),
4777 "Failed to populate and upload SCLK MCLK DPM levels!",
4778 result = tmp_result);
4779
4780 tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
4781 PP_ASSERT_WITH_CODE((0 == tmp_result),
4782 "Failed to generate DPM level enabled mask!",
4783 result = tmp_result);
4784
4785 tmp_result = fiji_update_vce_dpm(hwmgr, input);
4786 PP_ASSERT_WITH_CODE((0 == tmp_result),
4787 "Failed to update VCE DPM!",
4788 result = tmp_result);
4789
4790 tmp_result = fiji_update_sclk_threshold(hwmgr);
4791 PP_ASSERT_WITH_CODE((0 == tmp_result),
4792 "Failed to update SCLK threshold!",
4793 result = tmp_result);
4794
4795 tmp_result = fiji_program_mem_timing_parameters(hwmgr);
4796 PP_ASSERT_WITH_CODE((0 == tmp_result),
4797 "Failed to program memory timing parameters!",
4798 result = tmp_result);
4799
4800 tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
4801 PP_ASSERT_WITH_CODE((0 == tmp_result),
4802 "Failed to unfreeze SCLK MCLK DPM!",
4803 result = tmp_result);
4804
4805 tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
4806 PP_ASSERT_WITH_CODE((0 == tmp_result),
4807 "Failed to upload DPM level enabled mask!",
4808 result = tmp_result);
4809
4810 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4811 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4812 tmp_result =
4813 fiji_notify_link_speed_change_after_state_change(hwmgr, input);
4814 PP_ASSERT_WITH_CODE((0 == tmp_result),
4815 "Failed to notify link speed change after state change!",
4816 result = tmp_result);
4817 }
4818
4819 return result;
4820}
4821
4822static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
4823{
4824 struct pp_power_state *ps;
4825 struct fiji_power_state *fiji_ps;
4826
4827 if (hwmgr == NULL)
4828 return -EINVAL;
4829
4830 ps = hwmgr->request_ps;
4831
4832 if (ps == NULL)
4833 return -EINVAL;
4834
4835 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
4836
4837 if (low)
4838 return fiji_ps->performance_levels[0].engine_clock;
4839 else
4840 return fiji_ps->performance_levels
4841 [fiji_ps->performance_level_count-1].engine_clock;
4842}
4843
4844static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
4845{
4846 struct pp_power_state *ps;
4847 struct fiji_power_state *fiji_ps;
4848
4849 if (hwmgr == NULL)
4850 return -EINVAL;
4851
4852 ps = hwmgr->request_ps;
4853
4854 if (ps == NULL)
4855 return -EINVAL;
4856
4857 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
4858
4859 if (low)
4860 return fiji_ps->performance_levels[0].memory_clock;
4861 else
4862 return fiji_ps->performance_levels
4863 [fiji_ps->performance_level_count-1].memory_clock;
4864}
4865
4866static void fiji_print_current_perforce_level(
4867 struct pp_hwmgr *hwmgr, struct seq_file *m)
4868{
4869 uint32_t sclk, mclk, activity_percent = 0;
4870 uint32_t offset;
4871 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4872
4873 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
4874
4875 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4876
4877 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
4878
4879 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4880 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
4881 mclk / 100, sclk / 100);
4882
4883 offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
4884 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
4885 activity_percent += 0x80;
4886 activity_percent >>= 8;
4887
4888 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
4889}
4890
4891static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
4892{
4893 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4894 uint32_t num_active_displays = 0;
4895 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
4896 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4897 uint32_t display_gap2;
4898 uint32_t pre_vbi_time_in_us;
4899 uint32_t frame_time_in_us;
4900 uint32_t ref_clock;
4901 uint32_t refresh_rate = 0;
4902 struct cgs_display_info info = {0};
4903 struct cgs_mode_info mode_info;
4904
4905 info.mode_info = &mode_info;
4906
4907 cgs_get_active_displays_info(hwmgr->device, &info);
4908 num_active_displays = info.display_count;
4909
4910 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
4911 DISP_GAP, (num_active_displays > 0)?
4912 DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4913 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4914 ixCG_DISPLAY_GAP_CNTL, display_gap);
4915
4916 ref_clock = mode_info.ref_clock;
4917 refresh_rate = mode_info.refresh_rate;
4918
4919 if (refresh_rate == 0)
4920 refresh_rate = 60;
4921
4922 frame_time_in_us = 1000000 / refresh_rate;
4923
4924 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
4925 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4926
4927 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4928 ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4929
4930 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4931 data->soft_regs_start +
4932 offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
4933
4934 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4935 data->soft_regs_start +
4936 offsetof(SMU73_SoftRegisters, VBlankTimeout),
4937 (frame_time_in_us - pre_vbi_time_in_us));
4938
4939 if (num_active_displays == 1)
4940 tonga_notify_smc_display_change(hwmgr, true);
4941
4942 return 0;
4943}
4944
4945int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4946{
4947 return fiji_program_display_gap(hwmgr);
4948}
4949
4950static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
4951 uint16_t us_max_fan_pwm)
4952{
4953 hwmgr->thermal_controller.
4954 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4955
4956 if (phm_is_hw_access_blocked(hwmgr))
4957 return 0;
4958
4959 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4960 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4961}
4962
4963static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
4964 uint16_t us_max_fan_rpm)
4965{
4966 hwmgr->thermal_controller.
4967 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4968
4969 if (phm_is_hw_access_blocked(hwmgr))
4970 return 0;
4971
4972 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4973 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4974}
4975
4976int fiji_dpm_set_interrupt_state(void *private_data,
4977 unsigned src_id, unsigned type,
4978 int enabled)
4979{
4980 uint32_t cg_thermal_int;
4981 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
4982
4983 if (hwmgr == NULL)
4984 return -EINVAL;
4985
4986 switch (type) {
4987 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
4988 if (enabled) {
4989 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
4990 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
4991 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
4992 cgs_write_ind_register(hwmgr->device,
4993 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
4994 } else {
4995 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
4996 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
4997 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
4998 cgs_write_ind_register(hwmgr->device,
4999 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5000 }
5001 break;
5002
5003 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5004 if (enabled) {
5005 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5006 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5007 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5008 cgs_write_ind_register(hwmgr->device,
5009 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5010 } else {
5011 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5012 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5013 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5014 cgs_write_ind_register(hwmgr->device,
5015 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5016 }
5017 break;
5018 default:
5019 break;
5020 }
5021 return 0;
5022}
5023
5024int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
5025 const void *thermal_interrupt_info)
5026{
5027 int result;
5028 const struct pp_interrupt_registration_info *info =
5029 (const struct pp_interrupt_registration_info *)
5030 thermal_interrupt_info;
5031
5032 if (info == NULL)
5033 return -EINVAL;
5034
5035 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
5036 fiji_dpm_set_interrupt_state,
5037 info->call_back, info->context);
5038
5039 if (result)
5040 return -EINVAL;
5041
5042 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
5043 fiji_dpm_set_interrupt_state,
5044 info->call_back, info->context);
5045
5046 if (result)
5047 return -EINVAL;
5048
5049 return 0;
5050}
5051
5052static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5053{
5054 if (mode) {
5055 /* stop auto-manage */
5056 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5057 PHM_PlatformCaps_MicrocodeFanControl))
5058 fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
5059 fiji_fan_ctrl_set_static_mode(hwmgr, mode);
5060 } else
5061 /* restart auto-manage */
5062 fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
5063
5064 return 0;
5065}
5066
5067static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5068{
5069 if (hwmgr->fan_ctrl_is_in_default_mode)
5070 return hwmgr->fan_ctrl_default_mode;
5071 else
5072 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
5073 CG_FDO_CTRL2, FDO_PWM_MODE);
5074}
5075
5076static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5077 .backend_init = &fiji_hwmgr_backend_init,
5078 .backend_fini = &tonga_hwmgr_backend_fini,
5079 .asic_setup = &fiji_setup_asic_task,
5080 .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
5081 .force_dpm_level = &fiji_dpm_force_dpm_level,
5082 .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
5083 .get_power_state_size = &fiji_get_power_state_size,
5084 .get_pp_table_entry = &fiji_get_pp_table_entry,
5085 .patch_boot_state = &fiji_patch_boot_state,
5086 .apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
5087 .power_state_set = &fiji_set_power_state_tasks,
5088 .get_sclk = &fiji_dpm_get_sclk,
5089 .get_mclk = &fiji_dpm_get_mclk,
5090 .print_current_perforce_level = &fiji_print_current_perforce_level,
5091 .powergate_uvd = &fiji_phm_powergate_uvd,
5092 .powergate_vce = &fiji_phm_powergate_vce,
5093 .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
5094 .notify_smc_display_config_after_ps_adjustment =
5095 &tonga_notify_smc_display_config_after_ps_adjustment,
5096 .display_config_changed = &fiji_display_configuration_changed_task,
5097 .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
5098 .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
5099 .get_temperature = fiji_thermal_get_temperature,
5100 .stop_thermal_controller = fiji_thermal_stop_thermal_controller,
5101 .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
5102 .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
5103 .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
5104 .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
5105 .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
5106 .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
5107 .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
5108 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
5109 .set_fan_control_mode = fiji_set_fan_control_mode,
5110 .get_fan_control_mode = fiji_get_fan_control_mode,
5111};
5112
5113int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
5114{
5115 struct fiji_hwmgr *data;
5116 int ret = 0;
5117
5118 data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
5119 if (data == NULL)
5120 return -ENOMEM;
5121
5122 hwmgr->backend = data;
5123 hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
5124 hwmgr->pptable_func = &tonga_pptable_funcs;
5125 pp_fiji_thermal_initialize(hwmgr);
5126 return ret;
5127}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
new file mode 100644
index 000000000000..22e273b1c1c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -0,0 +1,361 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _FIJI_HWMGR_H_
25#define _FIJI_HWMGR_H_
26
27#include "hwmgr.h"
28#include "smu73.h"
29#include "smu73_discrete.h"
30#include "ppatomctrl.h"
31#include "fiji_ppsmc.h"
32
33#define FIJI_MAX_HARDWARE_POWERLEVELS 2
34#define FIJI_AT_DFLT 30
35
36#define FIJI_VOLTAGE_CONTROL_NONE 0x0
37#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1
38#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2
39#define FIJI_VOLTAGE_CONTROL_MERGED 0x3
40
41#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
42#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
43#define DPMTABLE_UPDATE_SCLK 0x00000004
44#define DPMTABLE_UPDATE_MCLK 0x00000008
45
46struct fiji_performance_level {
47 uint32_t memory_clock;
48 uint32_t engine_clock;
49 uint16_t pcie_gen;
50 uint16_t pcie_lane;
51};
52
53struct fiji_uvd_clocks {
54 uint32_t vclk;
55 uint32_t dclk;
56};
57
58struct fiji_vce_clocks {
59 uint32_t evclk;
60 uint32_t ecclk;
61};
62
63struct fiji_power_state {
64 uint32_t magic;
65 struct fiji_uvd_clocks uvd_clks;
66 struct fiji_vce_clocks vce_clks;
67 uint32_t sam_clk;
68 uint32_t acp_clk;
69 uint16_t performance_level_count;
70 bool dc_compatible;
71 uint32_t sclk_threshold;
72 struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
73};
74
75struct fiji_dpm_level {
76 bool enabled;
77 uint32_t value;
78 uint32_t param1;
79};
80
81#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
82#define MAX_REGULAR_DPM_NUMBER 8
83#define FIJI_MINIMUM_ENGINE_CLOCK 2500
84
85struct fiji_single_dpm_table {
86 uint32_t count;
87 struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
88};
89
90struct fiji_dpm_table {
91 struct fiji_single_dpm_table sclk_table;
92 struct fiji_single_dpm_table mclk_table;
93 struct fiji_single_dpm_table pcie_speed_table;
94 struct fiji_single_dpm_table vddc_table;
95 struct fiji_single_dpm_table vddci_table;
96 struct fiji_single_dpm_table mvdd_table;
97};
98
99struct fiji_clock_registers {
100 uint32_t vCG_SPLL_FUNC_CNTL;
101 uint32_t vCG_SPLL_FUNC_CNTL_2;
102 uint32_t vCG_SPLL_FUNC_CNTL_3;
103 uint32_t vCG_SPLL_FUNC_CNTL_4;
104 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
105 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
106 uint32_t vDLL_CNTL;
107 uint32_t vMCLK_PWRMGT_CNTL;
108 uint32_t vMPLL_AD_FUNC_CNTL;
109 uint32_t vMPLL_DQ_FUNC_CNTL;
110 uint32_t vMPLL_FUNC_CNTL;
111 uint32_t vMPLL_FUNC_CNTL_1;
112 uint32_t vMPLL_FUNC_CNTL_2;
113 uint32_t vMPLL_SS1;
114 uint32_t vMPLL_SS2;
115};
116
117struct fiji_voltage_smio_registers {
118 uint32_t vS0_VID_LOWER_SMIO_CNTL;
119};
120
121#define FIJI_MAX_LEAKAGE_COUNT 8
122struct fiji_leakage_voltage {
123 uint16_t count;
124 uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT];
125 uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
126};
127
128struct fiji_vbios_boot_state {
129 uint16_t mvdd_bootup_value;
130 uint16_t vddc_bootup_value;
131 uint16_t vddci_bootup_value;
132 uint32_t sclk_bootup_value;
133 uint32_t mclk_bootup_value;
134 uint16_t pcie_gen_bootup_value;
135 uint16_t pcie_lane_bootup_value;
136};
137
138struct fiji_bacos {
139 uint32_t best_match;
140 uint32_t baco_flags;
141 struct fiji_performance_level performance_level;
142};
143
144/* Ultra Low Voltage parameter structure */
145struct fiji_ulv_parm {
146 bool ulv_supported;
147 uint32_t cg_ulv_parameter;
148 uint32_t ulv_volt_change_delay;
149 struct fiji_performance_level ulv_power_level;
150};
151
152struct fiji_display_timing {
153 uint32_t min_clock_in_sr;
154 uint32_t num_existing_displays;
155};
156
157struct fiji_dpmlevel_enable_mask {
158 uint32_t uvd_dpm_enable_mask;
159 uint32_t vce_dpm_enable_mask;
160 uint32_t acp_dpm_enable_mask;
161 uint32_t samu_dpm_enable_mask;
162 uint32_t sclk_dpm_enable_mask;
163 uint32_t mclk_dpm_enable_mask;
164 uint32_t pcie_dpm_enable_mask;
165};
166
167struct fiji_pcie_perf_range {
168 uint16_t max;
169 uint16_t min;
170};
171
172struct fiji_hwmgr {
173 struct fiji_dpm_table dpm_table;
174 struct fiji_dpm_table golden_dpm_table;
175
176 uint32_t voting_rights_clients0;
177 uint32_t voting_rights_clients1;
178 uint32_t voting_rights_clients2;
179 uint32_t voting_rights_clients3;
180 uint32_t voting_rights_clients4;
181 uint32_t voting_rights_clients5;
182 uint32_t voting_rights_clients6;
183 uint32_t voting_rights_clients7;
184 uint32_t static_screen_threshold_unit;
185 uint32_t static_screen_threshold;
186 uint32_t voltage_control;
187 uint32_t vddc_vddci_delta;
188
189 uint32_t active_auto_throttle_sources;
190
191 struct fiji_clock_registers clock_registers;
192 struct fiji_voltage_smio_registers voltage_smio_registers;
193
194 bool is_memory_gddr5;
195 uint16_t acpi_vddc;
196 bool pspp_notify_required;
197 uint16_t force_pcie_gen;
198 uint16_t acpi_pcie_gen;
199 uint32_t pcie_gen_cap;
200 uint32_t pcie_lane_cap;
201 uint32_t pcie_spc_cap;
202 struct fiji_leakage_voltage vddc_leakage;
203 struct fiji_leakage_voltage Vddci_leakage;
204
205 uint32_t mvdd_control;
206 uint32_t vddc_mask_low;
207 uint32_t mvdd_mask_low;
208 uint16_t max_vddc_in_pptable;
209 uint16_t min_vddc_in_pptable;
210 uint16_t max_vddci_in_pptable;
211 uint16_t min_vddci_in_pptable;
212 uint32_t mclk_strobe_mode_threshold;
213 uint32_t mclk_stutter_mode_threshold;
214 uint32_t mclk_edc_enable_threshold;
215 uint32_t mclk_edcwr_enable_threshold;
216 bool is_uvd_enabled;
217 struct fiji_vbios_boot_state vbios_boot_state;
218
219 bool battery_state;
220 bool is_tlu_enabled;
221
222 /* ---- SMC SRAM Address of firmware header tables ---- */
223 uint32_t sram_end;
224 uint32_t dpm_table_start;
225 uint32_t soft_regs_start;
226 uint32_t mc_reg_table_start;
227 uint32_t fan_table_start;
228 uint32_t arb_table_start;
229 struct SMU73_Discrete_DpmTable smc_state_table;
230 struct SMU73_Discrete_Ulv ulv_setting;
231
232 /* ---- Stuff originally coming from Evergreen ---- */
233 uint32_t vddci_control;
234 struct pp_atomctrl_voltage_table vddc_voltage_table;
235 struct pp_atomctrl_voltage_table vddci_voltage_table;
236 struct pp_atomctrl_voltage_table mvdd_voltage_table;
237
238 uint32_t mgcg_cgtt_local2;
239 uint32_t mgcg_cgtt_local3;
240 uint32_t gpio_debug;
241 uint32_t mc_micro_code_feature;
242 uint32_t highest_mclk;
243 uint16_t acpi_vddci;
244 uint8_t mvdd_high_index;
245 uint8_t mvdd_low_index;
246 bool dll_default_on;
247 bool performance_request_registered;
248
249 /* ---- Low Power Features ---- */
250 struct fiji_bacos bacos;
251 struct fiji_ulv_parm ulv;
252
253 /* ---- CAC Stuff ---- */
254 uint32_t cac_table_start;
255 bool cac_configuration_required;
256 bool driver_calculate_cac_leakage;
257 bool cac_enabled;
258
259 /* ---- DPM2 Parameters ---- */
260 uint32_t power_containment_features;
261 bool enable_dte_feature;
262 bool enable_tdc_limit_feature;
263 bool enable_pkg_pwr_tracking_feature;
264 bool disable_uvd_power_tune_feature;
265 struct fiji_pt_defaults *power_tune_defaults;
266 struct SMU73_Discrete_PmFuses power_tune_table;
267 uint32_t dte_tj_offset;
268 uint32_t fast_watermark_threshold;
269
270 /* ---- Phase Shedding ---- */
271 bool vddc_phase_shed_control;
272
273 /* ---- DI/DT ---- */
274 struct fiji_display_timing display_timing;
275
276 /* ---- Thermal Temperature Setting ---- */
277 struct fiji_dpmlevel_enable_mask dpm_level_enable_mask;
278 uint32_t need_update_smu7_dpm_table;
279 uint32_t sclk_dpm_key_disabled;
280 uint32_t mclk_dpm_key_disabled;
281 uint32_t pcie_dpm_key_disabled;
282 uint32_t min_engine_clocks;
283 struct fiji_pcie_perf_range pcie_gen_performance;
284 struct fiji_pcie_perf_range pcie_lane_performance;
285 struct fiji_pcie_perf_range pcie_gen_power_saving;
286 struct fiji_pcie_perf_range pcie_lane_power_saving;
287 bool use_pcie_performance_levels;
288 bool use_pcie_power_saving_levels;
289 uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
290 uint32_t mclk_activity_target;
291 uint32_t mclk_dpm0_activity_target;
292 uint32_t low_sclk_interrupt_threshold;
293 uint32_t last_mclk_dpm_enable_mask;
294 bool uvd_enabled;
295
296 /* ---- Power Gating States ---- */
297 bool uvd_power_gated;
298 bool vce_power_gated;
299 bool samu_power_gated;
300 bool acp_power_gated;
301 bool pg_acp_init;
302 bool frtc_enabled;
303 bool frtc_status_changed;
304};
305
306/* To convert to Q8.8 format for firmware */
307#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256
308
309enum Fiji_I2CLineID {
310 Fiji_I2CLineID_DDC1 = 0x90,
311 Fiji_I2CLineID_DDC2 = 0x91,
312 Fiji_I2CLineID_DDC3 = 0x92,
313 Fiji_I2CLineID_DDC4 = 0x93,
314 Fiji_I2CLineID_DDC5 = 0x94,
315 Fiji_I2CLineID_DDC6 = 0x95,
316 Fiji_I2CLineID_SCLSDA = 0x96,
317 Fiji_I2CLineID_DDCVGA = 0x97
318};
319
320#define Fiji_I2C_DDC1DATA 0
321#define Fiji_I2C_DDC1CLK 1
322#define Fiji_I2C_DDC2DATA 2
323#define Fiji_I2C_DDC2CLK 3
324#define Fiji_I2C_DDC3DATA 4
325#define Fiji_I2C_DDC3CLK 5
326#define Fiji_I2C_SDA 40
327#define Fiji_I2C_SCL 41
328#define Fiji_I2C_DDC4DATA 65
329#define Fiji_I2C_DDC4CLK 66
330#define Fiji_I2C_DDC5DATA 0x48
331#define Fiji_I2C_DDC5CLK 0x49
332#define Fiji_I2C_DDC6DATA 0x4a
333#define Fiji_I2C_DDC6CLK 0x4b
334#define Fiji_I2C_DDCVGADATA 0x4c
335#define Fiji_I2C_DDCVGACLK 0x4d
336
337#define FIJI_UNUSED_GPIO_PIN 0x7F
338
339extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
340extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
341extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
342extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
343extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
344int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
345int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
346int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
347int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
348int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
349
350#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
351#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
352
353#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
354#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
355
356#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
357#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
358
359#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
360
361#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
new file mode 100644
index 000000000000..6efcb2bac45f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -0,0 +1,553 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "smumgr.h"
26#include "fiji_hwmgr.h"
27#include "fiji_powertune.h"
28#include "fiji_smumgr.h"
29#include "smu73_discrete.h"
30#include "pp_debug.h"
31
32#define VOLTAGE_SCALE 4
33#define POWERTUNE_DEFAULT_SET_MAX 1
34
35struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
36 /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
37 {1, 0xF, 0xFD,
38 /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
39 0x19, 5, 45}
40};
41
42void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
43{
44 struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
45 struct phm_ppt_v1_information *table_info =
46 (struct phm_ppt_v1_information *)(hwmgr->pptable);
47 uint32_t tmp = 0;
48
49 if(table_info &&
50 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
51 table_info->cac_dtp_table->usPowerTuneDataSetID)
52 fiji_hwmgr->power_tune_defaults =
53 &fiji_power_tune_data_set_array
54 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
55 else
56 fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
57
58 /* Assume disabled */
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
60 PHM_PlatformCaps_PowerContainment);
61 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
62 PHM_PlatformCaps_CAC);
63 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
64 PHM_PlatformCaps_SQRamping);
65 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
66 PHM_PlatformCaps_DBRamping);
67 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
68 PHM_PlatformCaps_TDRamping);
69 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
70 PHM_PlatformCaps_TCPRamping);
71
72 fiji_hwmgr->dte_tj_offset = tmp;
73
74 if (!tmp) {
75 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
76 PHM_PlatformCaps_PowerContainment);
77
78 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
79 PHM_PlatformCaps_CAC);
80
81 fiji_hwmgr->fast_watermark_threshold = 100;
82
83 tmp = 1;
84 fiji_hwmgr->enable_dte_feature = tmp ? false : true;
85 fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
86 fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
87 }
88}
89
90/* PPGen has the gain setting generated in x * 100 unit
91 * This function is to convert the unit to x * 4096(0x1000) unit.
92 * This is the unit expected by SMC firmware
93 */
94static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
95{
96 uint32_t tmp;
97 tmp = raw_setting * 4096 / 100;
98 return (uint16_t)tmp;
99}
100
101static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
102{
103 switch (line) {
104 case Fiji_I2CLineID_DDC1 :
105 *scl = Fiji_I2C_DDC1CLK;
106 *sda = Fiji_I2C_DDC1DATA;
107 break;
108 case Fiji_I2CLineID_DDC2 :
109 *scl = Fiji_I2C_DDC2CLK;
110 *sda = Fiji_I2C_DDC2DATA;
111 break;
112 case Fiji_I2CLineID_DDC3 :
113 *scl = Fiji_I2C_DDC3CLK;
114 *sda = Fiji_I2C_DDC3DATA;
115 break;
116 case Fiji_I2CLineID_DDC4 :
117 *scl = Fiji_I2C_DDC4CLK;
118 *sda = Fiji_I2C_DDC4DATA;
119 break;
120 case Fiji_I2CLineID_DDC5 :
121 *scl = Fiji_I2C_DDC5CLK;
122 *sda = Fiji_I2C_DDC5DATA;
123 break;
124 case Fiji_I2CLineID_DDC6 :
125 *scl = Fiji_I2C_DDC6CLK;
126 *sda = Fiji_I2C_DDC6DATA;
127 break;
128 case Fiji_I2CLineID_SCLSDA :
129 *scl = Fiji_I2C_SCL;
130 *sda = Fiji_I2C_SDA;
131 break;
132 case Fiji_I2CLineID_DDCVGA :
133 *scl = Fiji_I2C_DDCVGACLK;
134 *sda = Fiji_I2C_DDCVGADATA;
135 break;
136 default:
137 *scl = 0;
138 *sda = 0;
139 break;
140 }
141}
142
143int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
144{
145 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
146 struct fiji_pt_defaults *defaults = data->power_tune_defaults;
147 SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
148 struct phm_ppt_v1_information *table_info =
149 (struct phm_ppt_v1_information *)(hwmgr->pptable);
150 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
151 struct pp_advance_fan_control_parameters *fan_table=
152 &hwmgr->thermal_controller.advanceFanControlParameters;
153 uint8_t uc_scl, uc_sda;
154
155 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
156 * as requested by SMC team
157 */
158 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
159 (uint16_t)(cac_dtp_table->usTDP * 128));
160 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
161 (uint16_t)(cac_dtp_table->usTDP * 128));
162
163 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
164 "Target Operating Temp is out of Range!",);
165
166 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
167 dpm_table->GpuTjHyst = 8;
168
169 dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
170
171 /* The following are for new Fiji Multi-input fan/thermal control */
172 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
173 cac_dtp_table->usTargetOperatingTemp * 256);
174 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
175 cac_dtp_table->usTemperatureLimitHotspot * 256);
176 dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
177 cac_dtp_table->usTemperatureLimitLiquid1 * 256);
178 dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
179 cac_dtp_table->usTemperatureLimitLiquid2 * 256);
180 dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
181 cac_dtp_table->usTemperatureLimitVrVddc * 256);
182 dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
183 cac_dtp_table->usTemperatureLimitVrMvdd * 256);
184 dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
185 cac_dtp_table->usTemperatureLimitPlx * 256);
186
187 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
188 scale_fan_gain_settings(fan_table->usFanGainEdge));
189 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
190 scale_fan_gain_settings(fan_table->usFanGainHotspot));
191 dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
192 scale_fan_gain_settings(fan_table->usFanGainLiquid));
193 dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
194 scale_fan_gain_settings(fan_table->usFanGainVrVddc));
195 dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
196 scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
197 dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
198 scale_fan_gain_settings(fan_table->usFanGainPlx));
199 dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
200 scale_fan_gain_settings(fan_table->usFanGainHbm));
201
202 dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
203 dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
204 dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
205 dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
206
207 get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
208 dpm_table->Liquid_I2C_LineSCL = uc_scl;
209 dpm_table->Liquid_I2C_LineSDA = uc_sda;
210
211 get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
212 dpm_table->Vr_I2C_LineSCL = uc_scl;
213 dpm_table->Vr_I2C_LineSDA = uc_sda;
214
215 get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
216 dpm_table->Plx_I2C_LineSCL = uc_scl;
217 dpm_table->Plx_I2C_LineSDA = uc_sda;
218
219 return 0;
220}
221
222static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
223{
224 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
225 struct fiji_pt_defaults *defaults = data->power_tune_defaults;
226
227 data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
228 data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
229 data->power_tune_table.SviLoadLineTrimVddC = 3;
230 data->power_tune_table.SviLoadLineOffsetVddC = 0;
231
232 return 0;
233}
234
235static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
236{
237 uint16_t tdc_limit;
238 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
239 struct phm_ppt_v1_information *table_info =
240 (struct phm_ppt_v1_information *)(hwmgr->pptable);
241 struct fiji_pt_defaults *defaults = data->power_tune_defaults;
242
243 /* TDC number of fraction bits are changed from 8 to 7
244 * for Fiji as requested by SMC team
245 */
246 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
247 data->power_tune_table.TDC_VDDC_PkgLimit =
248 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
249 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
250 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
251 data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
252
253 return 0;
254}
255
256static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
257{
258 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
259 struct fiji_pt_defaults *defaults = data->power_tune_defaults;
260 uint32_t temp;
261
262 if (fiji_read_smc_sram_dword(hwmgr->smumgr,
263 fuse_table_offset +
264 offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
265 (uint32_t *)&temp, data->sram_end))
266 PP_ASSERT_WITH_CODE(false,
267 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
268 return -EINVAL);
269 else {
270 data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
271 data->power_tune_table.LPMLTemperatureMin =
272 (uint8_t)((temp >> 16) & 0xff);
273 data->power_tune_table.LPMLTemperatureMax =
274 (uint8_t)((temp >> 8) & 0xff);
275 data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
276 }
277 return 0;
278}
279
280static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
281{
282 int i;
283 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
284
285 /* Currently not used. Set all to zero. */
286 for (i = 0; i < 16; i++)
287 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
288
289 return 0;
290}
291
292static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
293{
294 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
295
296 if( (hwmgr->thermal_controller.advanceFanControlParameters.
297 usFanOutputSensitivity & (1 << 15)) ||
298 0 == hwmgr->thermal_controller.advanceFanControlParameters.
299 usFanOutputSensitivity )
300 hwmgr->thermal_controller.advanceFanControlParameters.
301 usFanOutputSensitivity = hwmgr->thermal_controller.
302 advanceFanControlParameters.usDefaultFanOutputSensitivity;
303
304 data->power_tune_table.FuzzyFan_PwmSetDelta =
305 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
306 advanceFanControlParameters.usFanOutputSensitivity);
307 return 0;
308}
309
310static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
311{
312 int i;
313 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
314
315 /* Currently not used. Set all to zero. */
316 for (i = 0; i < 16; i++)
317 data->power_tune_table.GnbLPML[i] = 0;
318
319 return 0;
320}
321
322static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
323{
324 /* int i, min, max;
325 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
326 uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
327 uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
328
329 min = max = pHiVID[0];
330 for (i = 0; i < 8; i++) {
331 if (0 != pHiVID[i]) {
332 if (min > pHiVID[i])
333 min = pHiVID[i];
334 if (max < pHiVID[i])
335 max = pHiVID[i];
336 }
337
338 if (0 != pLoVID[i]) {
339 if (min > pLoVID[i])
340 min = pLoVID[i];
341 if (max < pLoVID[i])
342 max = pLoVID[i];
343 }
344 }
345
346 PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
347 data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
348 data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
349*/
350 return 0;
351}
352
353static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
354{
355 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
356 struct phm_ppt_v1_information *table_info =
357 (struct phm_ppt_v1_information *)(hwmgr->pptable);
358 uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
359 uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
360 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
361
362 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
363 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
364
365 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
366 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
367 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
368 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
369
370 return 0;
371}
372
373int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
374{
375 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
376 uint32_t pm_fuse_table_offset;
377
378 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
379 PHM_PlatformCaps_PowerContainment)) {
380 if (fiji_read_smc_sram_dword(hwmgr->smumgr,
381 SMU7_FIRMWARE_HEADER_LOCATION +
382 offsetof(SMU73_Firmware_Header, PmFuseTable),
383 &pm_fuse_table_offset, data->sram_end))
384 PP_ASSERT_WITH_CODE(false,
385 "Attempt to get pm_fuse_table_offset Failed!",
386 return -EINVAL);
387
388 /* DW6 */
389 if (fiji_populate_svi_load_line(hwmgr))
390 PP_ASSERT_WITH_CODE(false,
391 "Attempt to populate SviLoadLine Failed!",
392 return -EINVAL);
393 /* DW7 */
394 if (fiji_populate_tdc_limit(hwmgr))
395 PP_ASSERT_WITH_CODE(false,
396 "Attempt to populate TDCLimit Failed!", return -EINVAL);
397 /* DW8 */
398 if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
399 PP_ASSERT_WITH_CODE(false,
400 "Attempt to populate TdcWaterfallCtl, "
401 "LPMLTemperature Min and Max Failed!",
402 return -EINVAL);
403
404 /* DW9-DW12 */
405 if (0 != fiji_populate_temperature_scaler(hwmgr))
406 PP_ASSERT_WITH_CODE(false,
407 "Attempt to populate LPMLTemperatureScaler Failed!",
408 return -EINVAL);
409
410 /* DW13-DW14 */
411 if(fiji_populate_fuzzy_fan(hwmgr))
412 PP_ASSERT_WITH_CODE(false,
413 "Attempt to populate Fuzzy Fan Control parameters Failed!",
414 return -EINVAL);
415
416 /* DW15-DW18 */
417 if (fiji_populate_gnb_lpml(hwmgr))
418 PP_ASSERT_WITH_CODE(false,
419 "Attempt to populate GnbLPML Failed!",
420 return -EINVAL);
421
422 /* DW19 */
423 if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
424 PP_ASSERT_WITH_CODE(false,
425 "Attempt to populate GnbLPML Min and Max Vid Failed!",
426 return -EINVAL);
427
428 /* DW20 */
429 if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
430 PP_ASSERT_WITH_CODE(false,
431 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
432 "Sidd Failed!", return -EINVAL);
433
434 if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
435 (uint8_t *)&data->power_tune_table,
436 sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
437 PP_ASSERT_WITH_CODE(false,
438 "Attempt to download PmFuseTable Failed!",
439 return -EINVAL);
440 }
441 return 0;
442}
443
444int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
445{
446 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
447 int result = 0;
448
449 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
450 PHM_PlatformCaps_CAC)) {
451 int smc_result;
452 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
453 (uint16_t)(PPSMC_MSG_EnableCac));
454 PP_ASSERT_WITH_CODE((0 == smc_result),
455 "Failed to enable CAC in SMC.", result = -1);
456
457 data->cac_enabled = (0 == smc_result) ? true : false;
458 }
459 return result;
460}
461
462int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
463{
464 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
465
466 if(data->power_containment_features &
467 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
468 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
469 PPSMC_MSG_PkgPwrSetLimit, n);
470 return 0;
471}
472
473static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
474{
475 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
476 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
477}
478
479int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
480{
481 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
482 struct phm_ppt_v1_information *table_info =
483 (struct phm_ppt_v1_information *)(hwmgr->pptable);
484 int smc_result;
485 int result = 0;
486
487 data->power_containment_features = 0;
488 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
489 PHM_PlatformCaps_PowerContainment)) {
490 if (data->enable_dte_feature) {
491 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
492 (uint16_t)(PPSMC_MSG_EnableDTE));
493 PP_ASSERT_WITH_CODE((0 == smc_result),
494 "Failed to enable DTE in SMC.", result = -1;);
495 if (0 == smc_result)
496 data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
497 }
498
499 if (data->enable_tdc_limit_feature) {
500 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
501 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
502 PP_ASSERT_WITH_CODE((0 == smc_result),
503 "Failed to enable TDCLimit in SMC.", result = -1;);
504 if (0 == smc_result)
505 data->power_containment_features |=
506 POWERCONTAINMENT_FEATURE_TDCLimit;
507 }
508
509 if (data->enable_pkg_pwr_tracking_feature) {
510 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
511 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
512 PP_ASSERT_WITH_CODE((0 == smc_result),
513 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
514 if (0 == smc_result) {
515 struct phm_cac_tdp_table *cac_table =
516 table_info->cac_dtp_table;
517 uint32_t default_limit =
518 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
519
520 data->power_containment_features |=
521 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
522
523 if (fiji_set_power_limit(hwmgr, default_limit))
524 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
525 }
526 }
527 }
528 return result;
529}
530
531int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
532{
533 struct phm_ppt_v1_information *table_info =
534 (struct phm_ppt_v1_information *)(hwmgr->pptable);
535 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
536 int adjust_percent, target_tdp;
537 int result = 0;
538
539 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
540 PHM_PlatformCaps_PowerContainment)) {
541 /* adjustment percentage has already been validated */
542 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
543 hwmgr->platform_descriptor.TDPAdjustment :
544 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
545 /* SMC requested that target_tdp to be 7 bit fraction in DPM table
546 * but message to be 8 bit fraction for messages
547 */
548 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
549 result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
550 }
551
552 return result;
553}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
new file mode 100644
index 000000000000..55e58200f33a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef FIJI_POWERTUNE_H
24#define FIJI_POWERTUNE_H
25
26enum fiji_pt_config_reg_type {
27 FIJI_CONFIGREG_MMR = 0,
28 FIJI_CONFIGREG_SMC_IND,
29 FIJI_CONFIGREG_DIDT_IND,
30 FIJI_CONFIGREG_CACHE,
31 FIJI_CONFIGREG_MAX
32};
33
34/* PowerContainment Features */
35#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
36#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
37#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
38
39struct fiji_pt_config_reg {
40 uint32_t offset;
41 uint32_t mask;
42 uint32_t shift;
43 uint32_t value;
44 enum fiji_pt_config_reg_type type;
45};
46
47struct fiji_pt_defaults
48{
49 uint8_t SviLoadLineEn;
50 uint8_t SviLoadLineVddC;
51 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
52 uint8_t TDC_MAWt;
53 uint8_t TdcWaterfallCtl;
54 uint8_t DTEAmbientTempBase;
55};
56
57void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
58int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
59int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
60int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
61int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
62int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
63int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
64
65#endif /* FIJI_POWERTUNE_H */
66
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
new file mode 100644
index 000000000000..e76a7de9aa32
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
@@ -0,0 +1,687 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <asm/div64.h>
24#include "fiji_thermal.h"
25#include "fiji_hwmgr.h"
26#include "fiji_smumgr.h"
27#include "fiji_ppsmc.h"
28#include "smu/smu_7_1_3_d.h"
29#include "smu/smu_7_1_3_sh_mask.h"
30
31int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
32 struct phm_fan_speed_info *fan_speed_info)
33{
34
35 if (hwmgr->thermal_controller.fanInfo.bNoFan)
36 return 0;
37
38 fan_speed_info->supports_percent_read = true;
39 fan_speed_info->supports_percent_write = true;
40 fan_speed_info->min_percent = 0;
41 fan_speed_info->max_percent = 100;
42
43 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
44 PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
45 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
46 fan_speed_info->supports_rpm_read = true;
47 fan_speed_info->supports_rpm_write = true;
48 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
49 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
50 } else {
51 fan_speed_info->min_rpm = 0;
52 fan_speed_info->max_rpm = 0;
53 }
54
55 return 0;
56}
57
58int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
59 uint32_t *speed)
60{
61 uint32_t duty100;
62 uint32_t duty;
63 uint64_t tmp64;
64
65 if (hwmgr->thermal_controller.fanInfo.bNoFan)
66 return 0;
67
68 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
69 CG_FDO_CTRL1, FMAX_DUTY100);
70 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
71 CG_THERMAL_STATUS, FDO_PWM_DUTY);
72
73 if (duty100 == 0)
74 return -EINVAL;
75
76
77 tmp64 = (uint64_t)duty * 100;
78 do_div(tmp64, duty100);
79 *speed = (uint32_t)tmp64;
80
81 if (*speed > 100)
82 *speed = 100;
83
84 return 0;
85}
86
87int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
88{
89 uint32_t tach_period;
90 uint32_t crystal_clock_freq;
91
92 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
93 (hwmgr->thermal_controller.fanInfo.
94 ucTachometerPulsesPerRevolution == 0))
95 return 0;
96
97 tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
98 CG_TACH_STATUS, TACH_PERIOD);
99
100 if (tach_period == 0)
101 return -EINVAL;
102
103 crystal_clock_freq = tonga_get_xclk(hwmgr);
104
105 *speed = 60 * crystal_clock_freq * 10000/ tach_period;
106
107 return 0;
108}
109
110/**
111* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
112* @param hwmgr the address of the powerplay hardware manager.
113* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
114* @exception Should always succeed.
115*/
116int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
117{
118
119 if (hwmgr->fan_ctrl_is_in_default_mode) {
120 hwmgr->fan_ctrl_default_mode =
121 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
122 CG_FDO_CTRL2, FDO_PWM_MODE);
123 hwmgr->tmin =
124 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
125 CG_FDO_CTRL2, TMIN);
126 hwmgr->fan_ctrl_is_in_default_mode = false;
127 }
128
129 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
130 CG_FDO_CTRL2, TMIN, 0);
131 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
132 CG_FDO_CTRL2, FDO_PWM_MODE, mode);
133
134 return 0;
135}
136
137/**
138* Reset Fan Speed Control to default mode.
139* @param hwmgr the address of the powerplay hardware manager.
140* @exception Should always succeed.
141*/
142int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
143{
144 if (!hwmgr->fan_ctrl_is_in_default_mode) {
145 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
146 CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
147 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
148 CG_FDO_CTRL2, TMIN, hwmgr->tmin);
149 hwmgr->fan_ctrl_is_in_default_mode = true;
150 }
151
152 return 0;
153}
154
155int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
156{
157 int result;
158
159 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
160 PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
161 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
162 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
163
164 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
165 PHM_PlatformCaps_FanSpeedInTableIsRPM))
166 hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
167 hwmgr->thermal_controller.
168 advanceFanControlParameters.usMaxFanRPM);
169 else
170 hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
171 hwmgr->thermal_controller.
172 advanceFanControlParameters.usMaxFanPWM);
173
174 } else {
175 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
176 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
177 }
178
179 if (!result && hwmgr->thermal_controller.
180 advanceFanControlParameters.ucTargetTemperature)
181 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
182 PPSMC_MSG_SetFanTemperatureTarget,
183 hwmgr->thermal_controller.
184 advanceFanControlParameters.ucTargetTemperature);
185
186 return result;
187}
188
189
190int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
191{
192 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
193}
194
195/**
196* Set Fan Speed in percent.
197* @param hwmgr the address of the powerplay hardware manager.
198* @param speed is the percentage value (0% - 100%) to be set.
199* @exception Fails is the 100% setting appears to be 0.
200*/
201int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
202 uint32_t speed)
203{
204 uint32_t duty100;
205 uint32_t duty;
206 uint64_t tmp64;
207
208 if (hwmgr->thermal_controller.fanInfo.bNoFan)
209 return 0;
210
211 if (speed > 100)
212 speed = 100;
213
214 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_MicrocodeFanControl))
216 fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
217
218 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
219 CG_FDO_CTRL1, FMAX_DUTY100);
220
221 if (duty100 == 0)
222 return -EINVAL;
223
224 tmp64 = (uint64_t)speed * 100;
225 do_div(tmp64, duty100);
226 duty = (uint32_t)tmp64;
227
228 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
229 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
230
231 return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
232}
233
234/**
235* Reset Fan Speed to default.
236* @param hwmgr the address of the powerplay hardware manager.
237* @exception Always succeeds.
238*/
239int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
240{
241 int result;
242
243 if (hwmgr->thermal_controller.fanInfo.bNoFan)
244 return 0;
245
246 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_MicrocodeFanControl)) {
248 result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
249 if (!result)
250 result = fiji_fan_ctrl_start_smc_fan_control(hwmgr);
251 } else
252 result = fiji_fan_ctrl_set_default_mode(hwmgr);
253
254 return result;
255}
256
257/**
258* Set Fan Speed in RPM.
259* @param hwmgr the address of the powerplay hardware manager.
260* @param speed is the percentage value (min - max) to be set.
261* @exception Fails is the speed not lie between min and max.
262*/
263int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
264{
265 uint32_t tach_period;
266 uint32_t crystal_clock_freq;
267
268 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
269 (hwmgr->thermal_controller.fanInfo.
270 ucTachometerPulsesPerRevolution == 0) ||
271 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
273 return 0;
274
275 crystal_clock_freq = tonga_get_xclk(hwmgr);
276
277 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
278
279 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
280 CG_TACH_STATUS, TACH_PERIOD, tach_period);
281
282 return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
283}
284
285/**
286* Reads the remote temperature from the SIslands thermal controller.
287*
288* @param hwmgr The address of the hardware manager.
289*/
290int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
291{
292 int temp;
293
294 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
295 CG_MULT_THERMAL_STATUS, CTF_TEMP);
296
297 /* Bit 9 means the reading is lower than the lowest usable value. */
298 if (temp & 0x200)
299 temp = FIJI_THERMAL_MAXIMUM_TEMP_READING;
300 else
301 temp = temp & 0x1ff;
302
303 temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
304
305 return temp;
306}
307
308/**
309* Set the requested temperature range for high and low alert signals
310*
311* @param hwmgr The address of the hardware manager.
312* @param range Temperature range to be programmed for high and low alert signals
313* @exception PP_Result_BadInput if the input data is not valid.
314*/
315static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
316 uint32_t low_temp, uint32_t high_temp)
317{
318 uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP *
319 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
320 uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP *
321 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
322
323 if (low < low_temp)
324 low = low_temp;
325 if (high > high_temp)
326 high = high_temp;
327
328 if (low > high)
329 return -EINVAL;
330
331 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
332 CG_THERMAL_INT, DIG_THERM_INTH,
333 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
334 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
335 CG_THERMAL_INT, DIG_THERM_INTL,
336 (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
337 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
338 CG_THERMAL_CTRL, DIG_THERM_DPM,
339 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
340
341 return 0;
342}
343
344/**
345* Programs thermal controller one-time setting registers
346*
347* @param hwmgr The address of the hardware manager.
348*/
349static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
350{
351 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
352 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
353 CG_TACH_CTRL, EDGE_PER_REV,
354 hwmgr->thermal_controller.fanInfo.
355 ucTachometerPulsesPerRevolution - 1);
356
357 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
358 CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
359
360 return 0;
361}
362
363/**
364* Enable thermal alerts on the RV770 thermal controller.
365*
366* @param hwmgr The address of the hardware manager.
367*/
368static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
369{
370 uint32_t alert;
371
372 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
373 CG_THERMAL_INT, THERM_INT_MASK);
374 alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
375 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
376 CG_THERMAL_INT, THERM_INT_MASK, alert);
377
378 /* send message to SMU to enable internal thermal interrupts */
379 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
380}
381
382/**
383* Disable thermal alerts on the RV770 thermal controller.
384* @param hwmgr The address of the hardware manager.
385*/
386static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
387{
388 uint32_t alert;
389
390 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
391 CG_THERMAL_INT, THERM_INT_MASK);
392 alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
393 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
394 CG_THERMAL_INT, THERM_INT_MASK, alert);
395
396 /* send message to SMU to disable internal thermal interrupts */
397 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
398}
399
400/**
401* Uninitialize the thermal controller.
402* Currently just disables alerts.
403* @param hwmgr The address of the hardware manager.
404*/
405int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
406{
407 int result = fiji_thermal_disable_alert(hwmgr);
408
409 if (hwmgr->thermal_controller.fanInfo.bNoFan)
410 fiji_fan_ctrl_set_default_mode(hwmgr);
411
412 return result;
413}
414
415/**
416* Set up the fan table to control the fan using the SMC.
417* @param hwmgr the address of the powerplay hardware manager.
418* @param pInput the pointer to input data
419* @param pOutput the pointer to output data
420* @param pStorage the pointer to temporary storage
421* @param Result the last failure code
422* @return result from set temperature range routine
423*/
424int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
425 void *input, void *output, void *storage, int result)
426{
427 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
428 SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
429 uint32_t duty100;
430 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
431 uint16_t fdo_min, slope1, slope2;
432 uint32_t reference_clock;
433 int res;
434 uint64_t tmp64;
435
436 if (data->fan_table_start == 0) {
437 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
438 PHM_PlatformCaps_MicrocodeFanControl);
439 return 0;
440 }
441
442 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
443 CG_FDO_CTRL1, FMAX_DUTY100);
444
445 if (duty100 == 0) {
446 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
447 PHM_PlatformCaps_MicrocodeFanControl);
448 return 0;
449 }
450
451 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
452 usPWMMin * duty100;
453 do_div(tmp64, 10000);
454 fdo_min = (uint16_t)tmp64;
455
456 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
457 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
458 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
459 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
460
461 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
462 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
463 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
464 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
465
466 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
467 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
468
469 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
470 thermal_controller.advanceFanControlParameters.usTMin) / 100);
471 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
472 thermal_controller.advanceFanControlParameters.usTMed) / 100);
473 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
474 thermal_controller.advanceFanControlParameters.usTMax) / 100);
475
476 fan_table.Slope1 = cpu_to_be16(slope1);
477 fan_table.Slope2 = cpu_to_be16(slope2);
478
479 fan_table.FdoMin = cpu_to_be16(fdo_min);
480
481 fan_table.HystDown = cpu_to_be16(hwmgr->
482 thermal_controller.advanceFanControlParameters.ucTHyst);
483
484 fan_table.HystUp = cpu_to_be16(1);
485
486 fan_table.HystSlope = cpu_to_be16(1);
487
488 fan_table.TempRespLim = cpu_to_be16(5);
489
490 reference_clock = tonga_get_xclk(hwmgr);
491
492 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
493 thermal_controller.advanceFanControlParameters.ulCycleDelay *
494 reference_clock) / 1600);
495
496 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
497
498 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
499 hwmgr->device, CGS_IND_REG__SMC,
500 CG_MULT_THERMAL_CTRL, TEMP_SEL);
501
502 res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
503 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
504 data->sram_end);
505
506 if (!res && hwmgr->thermal_controller.
507 advanceFanControlParameters.ucMinimumPWMLimit)
508 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
509 PPSMC_MSG_SetFanMinPwm,
510 hwmgr->thermal_controller.
511 advanceFanControlParameters.ucMinimumPWMLimit);
512
513 if (!res && hwmgr->thermal_controller.
514 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
515 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
516 PPSMC_MSG_SetFanSclkTarget,
517 hwmgr->thermal_controller.
518 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
519
520 if (res)
521 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
522 PHM_PlatformCaps_MicrocodeFanControl);
523
524 return 0;
525}
526
527/**
528* Start the fan control on the SMC.
529* @param hwmgr the address of the powerplay hardware manager.
530* @param pInput the pointer to input data
531* @param pOutput the pointer to output data
532* @param pStorage the pointer to temporary storage
533* @param Result the last failure code
534* @return result from set temperature range routine
535*/
536int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
537 void *input, void *output, void *storage, int result)
538{
539/* If the fantable setup has failed we could have disabled
540 * PHM_PlatformCaps_MicrocodeFanControl even after
541 * this function was included in the table.
542 * Make sure that we still think controlling the fan is OK.
543*/
544 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
545 PHM_PlatformCaps_MicrocodeFanControl)) {
546 fiji_fan_ctrl_start_smc_fan_control(hwmgr);
547 fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
548 }
549
550 return 0;
551}
552
553/**
554* Set temperature range for high and low alerts
555* @param hwmgr the address of the powerplay hardware manager.
556* @param pInput the pointer to input data
557* @param pOutput the pointer to output data
558* @param pStorage the pointer to temporary storage
559* @param Result the last failure code
560* @return result from set temperature range routine
561*/
562int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
563 void *input, void *output, void *storage, int result)
564{
565 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
566
567 if (range == NULL)
568 return -EINVAL;
569
570 return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max);
571}
572
573/**
574* Programs one-time setting registers
575* @param hwmgr the address of the powerplay hardware manager.
576* @param pInput the pointer to input data
577* @param pOutput the pointer to output data
578* @param pStorage the pointer to temporary storage
579* @param Result the last failure code
580* @return result from initialize thermal controller routine
581*/
582int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
583 void *input, void *output, void *storage, int result)
584{
585 return fiji_thermal_initialize(hwmgr);
586}
587
588/**
589* Enable high and low alerts
590* @param hwmgr the address of the powerplay hardware manager.
591* @param pInput the pointer to input data
592* @param pOutput the pointer to output data
593* @param pStorage the pointer to temporary storage
594* @param Result the last failure code
595* @return result from enable alert routine
596*/
597int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
598 void *input, void *output, void *storage, int result)
599{
600 return fiji_thermal_enable_alert(hwmgr);
601}
602
603/**
604* Disable high and low alerts
605* @param hwmgr the address of the powerplay hardware manager.
606* @param pInput the pointer to input data
607* @param pOutput the pointer to output data
608* @param pStorage the pointer to temporary storage
609* @param Result the last failure code
610* @return result from disable alert routine
611*/
612static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
613 void *input, void *output, void *storage, int result)
614{
615 return fiji_thermal_disable_alert(hwmgr);
616}
617
618static struct phm_master_table_item
619fiji_thermal_start_thermal_controller_master_list[] = {
620 {NULL, tf_fiji_thermal_initialize},
621 {NULL, tf_fiji_thermal_set_temperature_range},
622 {NULL, tf_fiji_thermal_enable_alert},
623/* We should restrict performance levels to low before we halt the SMC.
624 * On the other hand we are still in boot state when we do this
625 * so it would be pointless.
626 * If this assumption changes we have to revisit this table.
627 */
628 {NULL, tf_fiji_thermal_setup_fan_table},
629 {NULL, tf_fiji_thermal_start_smc_fan_control},
630 {NULL, NULL}
631};
632
633static struct phm_master_table_header
634fiji_thermal_start_thermal_controller_master = {
635 0,
636 PHM_MasterTableFlag_None,
637 fiji_thermal_start_thermal_controller_master_list
638};
639
640static struct phm_master_table_item
641fiji_thermal_set_temperature_range_master_list[] = {
642 {NULL, tf_fiji_thermal_disable_alert},
643 {NULL, tf_fiji_thermal_set_temperature_range},
644 {NULL, tf_fiji_thermal_enable_alert},
645 {NULL, NULL}
646};
647
648struct phm_master_table_header
649fiji_thermal_set_temperature_range_master = {
650 0,
651 PHM_MasterTableFlag_None,
652 fiji_thermal_set_temperature_range_master_list
653};
654
655int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
656{
657 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
658 fiji_fan_ctrl_set_default_mode(hwmgr);
659 return 0;
660}
661
662/**
663* Initializes the thermal controller related functions in the Hardware Manager structure.
664* @param hwmgr The address of the hardware manager.
665* @exception Any error code from the low-level communication.
666*/
667int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
668{
669 int result;
670
671 result = phm_construct_table(hwmgr,
672 &fiji_thermal_set_temperature_range_master,
673 &(hwmgr->set_temperature_range));
674
675 if (!result) {
676 result = phm_construct_table(hwmgr,
677 &fiji_thermal_start_thermal_controller_master,
678 &(hwmgr->start_thermal_controller));
679 if (result)
680 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
681 }
682
683 if (!result)
684 hwmgr->fan_ctrl_is_in_default_mode = true;
685 return result;
686}
687
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
new file mode 100644
index 000000000000..8621493b8574
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_THERMAL_H
25#define FIJI_THERMAL_H
26
27#include "hwmgr.h"
28
29#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1
30#define FIJI_THERMAL_LOW_ALERT_MASK 0x2
31
32#define FIJI_THERMAL_MINIMUM_TEMP_READING -256
33#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0
36#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
new file mode 100644
index 000000000000..9deadabbc81c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include "hwmgr.h"
27
28static int phm_run_table(struct pp_hwmgr *hwmgr,
29 struct phm_runtime_table_header *rt_table,
30 void *input,
31 void *output,
32 void *temp_storage)
33{
34 int result = 0;
35 phm_table_function *function;
36
37 for (function = rt_table->function_list; NULL != *function; function++) {
38 int tmp = (*function)(hwmgr, input, output, temp_storage, result);
39
40 if (tmp == PP_Result_TableImmediateExit)
41 break;
42 if (tmp) {
43 if (0 == result)
44 result = tmp;
45 if (rt_table->exit_error)
46 break;
47 }
48 }
49
50 return result;
51}
52
53int phm_dispatch_table(struct pp_hwmgr *hwmgr,
54 struct phm_runtime_table_header *rt_table,
55 void *input, void *output)
56{
57 int result = 0;
58 void *temp_storage = NULL;
59
60 if (hwmgr == NULL || rt_table == NULL || rt_table->function_list == NULL) {
61 printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
62 return 0; /*temp return ture because some function not implement on some asic */
63 }
64
65 if (0 != rt_table->storage_size) {
66 temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL);
67 if (temp_storage == NULL) {
68 printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n");
69 return -ENOMEM;
70 }
71 }
72
73 result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
74
75 if (NULL != temp_storage)
76 kfree(temp_storage);
77
78 return result;
79}
80
81int phm_construct_table(struct pp_hwmgr *hwmgr,
82 struct phm_master_table_header *master_table,
83 struct phm_runtime_table_header *rt_table)
84{
85 uint32_t function_count = 0;
86 const struct phm_master_table_item *table_item;
87 uint32_t size;
88 phm_table_function *run_time_list;
89 phm_table_function *rtf;
90
91 if (hwmgr == NULL || master_table == NULL || rt_table == NULL) {
92 printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
93 return -EINVAL;
94 }
95
96 for (table_item = master_table->master_list;
97 NULL != table_item->tableFunction; table_item++) {
98 if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
99 (table_item->isFunctionNeededInRuntimeTable(hwmgr)))
100 function_count++;
101 }
102
103 size = (function_count + 1) * sizeof(phm_table_function);
104 run_time_list = kzalloc(size, GFP_KERNEL);
105
106 if (NULL == run_time_list)
107 return -ENOMEM;
108
109 rtf = run_time_list;
110 for (table_item = master_table->master_list;
111 NULL != table_item->tableFunction; table_item++) {
112 if ((rtf - run_time_list) > function_count) {
113 printk(KERN_ERR "[ powerplay ] Check function results have changed\n");
114 kfree(run_time_list);
115 return -EINVAL;
116 }
117
118 if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
119 (table_item->isFunctionNeededInRuntimeTable(hwmgr))) {
120 *(rtf++) = table_item->tableFunction;
121 }
122 }
123
124 if ((rtf - run_time_list) > function_count) {
125 printk(KERN_ERR "[ powerplay ] Check function results have changed\n");
126 kfree(run_time_list);
127 return -EINVAL;
128 }
129
130 *rtf = NULL;
131 rt_table->function_list = run_time_list;
132 rt_table->exit_error = (0 != (master_table->flags & PHM_MasterTableFlag_ExitOnError));
133 rt_table->storage_size = master_table->storage_size;
134 return 0;
135}
136
137int phm_destroy_table(struct pp_hwmgr *hwmgr,
138 struct phm_runtime_table_header *rt_table)
139{
140 if (hwmgr == NULL || rt_table == NULL) {
141 printk(KERN_ERR "[ powerplay ] Invalid Parameter\n");
142 return -EINVAL;
143 }
144
145 if (NULL == rt_table->function_list)
146 return 0;
147
148 kfree(rt_table->function_list);
149
150 rt_table->function_list = NULL;
151 rt_table->storage_size = 0;
152 rt_table->exit_error = false;
153
154 return 0;
155}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
new file mode 100644
index 000000000000..0f2d5e4bc241
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/errno.h>
24#include "hwmgr.h"
25#include "hardwaremanager.h"
26#include "power_state.h"
27#include "pp_acpi.h"
28#include "amd_acpi.h"
29#include "amd_powerplay.h"
30
31#define PHM_FUNC_CHECK(hw) \
32 do { \
33 if ((hw) == NULL || (hw)->hwmgr_func == NULL) \
34 return -EINVAL; \
35 } while (0)
36
37void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
38{
39 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
40 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
41 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
42 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
43 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
44 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
45 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
46 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
47 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
48 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
49
50 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
51 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
52 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
53
54 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
55
56 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
57 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
58
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
60
61 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
62 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
63 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
64}
65
66bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
67{
68 return hwmgr->block_hw_access;
69}
70
71int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block)
72{
73 hwmgr->block_hw_access = block;
74 return 0;
75}
76
77int phm_setup_asic(struct pp_hwmgr *hwmgr)
78{
79 PHM_FUNC_CHECK(hwmgr);
80
81 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
82 PHM_PlatformCaps_TablelessHardwareInterface)) {
83 if (NULL != hwmgr->hwmgr_func->asic_setup)
84 return hwmgr->hwmgr_func->asic_setup(hwmgr);
85 } else {
86 return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic),
87 NULL, NULL);
88 }
89
90 return 0;
91}
92
93int phm_power_down_asic(struct pp_hwmgr *hwmgr)
94{
95 PHM_FUNC_CHECK(hwmgr);
96
97 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
98 PHM_PlatformCaps_TablelessHardwareInterface)) {
99 if (NULL != hwmgr->hwmgr_func->power_off_asic)
100 return hwmgr->hwmgr_func->power_off_asic(hwmgr);
101 } else {
102 return phm_dispatch_table(hwmgr, &(hwmgr->power_down_asic),
103 NULL, NULL);
104 }
105
106 return 0;
107}
108
109int phm_set_power_state(struct pp_hwmgr *hwmgr,
110 const struct pp_hw_power_state *pcurrent_state,
111 const struct pp_hw_power_state *pnew_power_state)
112{
113 struct phm_set_power_state_input states;
114
115 PHM_FUNC_CHECK(hwmgr);
116
117 states.pcurrent_state = pcurrent_state;
118 states.pnew_state = pnew_power_state;
119
120 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
121 PHM_PlatformCaps_TablelessHardwareInterface)) {
122 if (NULL != hwmgr->hwmgr_func->power_state_set)
123 return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
124 } else {
125 return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL);
126 }
127
128 return 0;
129}
130
131int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
132{
133 PHM_FUNC_CHECK(hwmgr);
134
135 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
136 PHM_PlatformCaps_TablelessHardwareInterface)) {
137 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
138 return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
139 } else {
140 return phm_dispatch_table(hwmgr,
141 &(hwmgr->enable_dynamic_state_management),
142 NULL, NULL);
143 }
144 return 0;
145}
146
147int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
148{
149 PHM_FUNC_CHECK(hwmgr);
150
151 if (hwmgr->hwmgr_func->force_dpm_level != NULL)
152 return hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
153
154 return 0;
155}
156
157int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
158 struct pp_power_state *adjusted_ps,
159 const struct pp_power_state *current_ps)
160{
161 PHM_FUNC_CHECK(hwmgr);
162
163 if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
164 return hwmgr->hwmgr_func->apply_state_adjust_rules(
165 hwmgr,
166 adjusted_ps,
167 current_ps);
168 return 0;
169}
170
171int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
172{
173 PHM_FUNC_CHECK(hwmgr);
174
175 if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
176 return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
177 return 0;
178}
179
180int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate)
181{
182 PHM_FUNC_CHECK(hwmgr);
183
184 if (hwmgr->hwmgr_func->powergate_uvd != NULL)
185 return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
186 return 0;
187}
188
189int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate)
190{
191 PHM_FUNC_CHECK(hwmgr);
192
193 if (hwmgr->hwmgr_func->powergate_vce != NULL)
194 return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
195 return 0;
196}
197
198int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
199{
200 PHM_FUNC_CHECK(hwmgr);
201
202 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_TablelessHardwareInterface)) {
204 if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
205 return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
206 } else {
207 return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL);
208 }
209 return 0;
210}
211
212int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
213{
214 PHM_FUNC_CHECK(hwmgr);
215
216 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_TablelessHardwareInterface)) {
218 if (NULL != hwmgr->hwmgr_func->display_config_changed)
219 hwmgr->hwmgr_func->display_config_changed(hwmgr);
220 } else
221 return phm_dispatch_table(hwmgr, &hwmgr->display_configuration_changed, NULL, NULL);
222 return 0;
223}
224
225int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
226{
227 PHM_FUNC_CHECK(hwmgr);
228
229 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_TablelessHardwareInterface))
231 if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
232 hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
233
234 return 0;
235}
236
237int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
238{
239 PHM_FUNC_CHECK(hwmgr);
240
241 if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
242 return -EINVAL;
243
244 return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr);
245}
246
247int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
248{
249 PHM_FUNC_CHECK(hwmgr);
250
251 if (hwmgr->hwmgr_func->register_internal_thermal_interrupt == NULL)
252 return -EINVAL;
253
254 return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
255}
256
257/**
258* Initializes the thermal controller subsystem.
259*
260* @param pHwMgr the address of the powerplay hardware manager.
261* @param pTemperatureRange the address of the structure holding the temperature range.
262* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
263*/
264int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range)
265{
266 return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), temperature_range, NULL);
267}
268
269
270bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
271{
272 PHM_FUNC_CHECK(hwmgr);
273
274 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
275 return -EINVAL;
276
277 return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
278}
279
280
281int phm_check_states_equal(struct pp_hwmgr *hwmgr,
282 const struct pp_hw_power_state *pstate1,
283 const struct pp_hw_power_state *pstate2,
284 bool *equal)
285{
286 PHM_FUNC_CHECK(hwmgr);
287
288 if (hwmgr->hwmgr_func->check_states_equal == NULL)
289 return -EINVAL;
290
291 return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal);
292}
293
294int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
295 const struct amd_pp_display_configuration *display_config)
296{
297 PHM_FUNC_CHECK(hwmgr);
298
299 if (hwmgr->hwmgr_func->store_cc6_data == NULL)
300 return -EINVAL;
301
302 hwmgr->display_config = *display_config;
303 /* to do pass other display configuration in furture */
304
305 if (hwmgr->hwmgr_func->store_cc6_data)
306 hwmgr->hwmgr_func->store_cc6_data(hwmgr,
307 display_config->cpu_pstate_separation_time,
308 display_config->cpu_cc6_disable,
309 display_config->cpu_pstate_disable,
310 display_config->nb_pstate_switch_disable);
311
312 return 0;
313}
314
315int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
316 struct amd_pp_dal_clock_info *info)
317{
318 PHM_FUNC_CHECK(hwmgr);
319
320 if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
321 return -EINVAL;
322
323 return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
324}
325
326int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
327{
328 PHM_FUNC_CHECK(hwmgr);
329
330 if (hwmgr->hwmgr_func->set_cpu_power_state != NULL)
331 return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr);
332
333 return 0;
334}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
new file mode 100644
index 000000000000..5fb98aa2e719
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -0,0 +1,563 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "linux/delay.h"
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include "cgs_common.h"
28#include "power_state.h"
29#include "hwmgr.h"
30#include "pppcielanes.h"
31#include "pp_debug.h"
32#include "ppatomctrl.h"
33
34extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
35extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
36extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
37
38int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
39{
40 struct pp_hwmgr *hwmgr;
41
42 if ((handle == NULL) || (pp_init == NULL))
43 return -EINVAL;
44
45 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46 if (hwmgr == NULL)
47 return -ENOMEM;
48
49 handle->hwmgr = hwmgr;
50 hwmgr->smumgr = handle->smu_mgr;
51 hwmgr->device = pp_init->device;
52 hwmgr->chip_family = pp_init->chip_family;
53 hwmgr->chip_id = pp_init->chip_id;
54 hwmgr->hw_revision = pp_init->rev_id;
55 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
56 hwmgr->power_source = PP_PowerSource_AC;
57
58 switch (hwmgr->chip_family) {
59 case AMD_FAMILY_CZ:
60 cz_hwmgr_init(hwmgr);
61 break;
62 case AMD_FAMILY_VI:
63 switch (hwmgr->chip_id) {
64 case CHIP_TONGA:
65 tonga_hwmgr_init(hwmgr);
66 break;
67 case CHIP_FIJI:
68 fiji_hwmgr_init(hwmgr);
69 break;
70 default:
71 return -EINVAL;
72 }
73 break;
74 default:
75 return -EINVAL;
76 }
77
78 phm_init_dynamic_caps(hwmgr);
79
80 return 0;
81}
82
83int hwmgr_fini(struct pp_hwmgr *hwmgr)
84{
85 if (hwmgr == NULL || hwmgr->ps == NULL)
86 return -EINVAL;
87
88 kfree(hwmgr->ps);
89 kfree(hwmgr);
90 return 0;
91}
92
93int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
94{
95 int result;
96 unsigned int i;
97 unsigned int table_entries;
98 struct pp_power_state *state;
99 int size;
100
101 if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
102 return -EINVAL;
103
104 if (hwmgr->hwmgr_func->get_power_state_size == NULL)
105 return -EINVAL;
106
107 hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
108
109 hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
110 sizeof(struct pp_power_state);
111
112 hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
113
114 if (hwmgr->ps == NULL)
115 return -ENOMEM;
116
117 state = hwmgr->ps;
118
119 for (i = 0; i < table_entries; i++) {
120 result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
121
122 if (state->classification.flags & PP_StateClassificationFlag_Boot) {
123 hwmgr->boot_ps = state;
124 hwmgr->current_ps = hwmgr->request_ps = state;
125 }
126
127 state->id = i + 1; /* assigned unique num for every power state id */
128
129 if (state->classification.flags & PP_StateClassificationFlag_Uvd)
130 hwmgr->uvd_ps = state;
131 state = (struct pp_power_state *)((unsigned long)state + size);
132 }
133
134 return 0;
135}
136
137
138/**
139 * Returns once the part of the register indicated by the mask has
140 * reached the given value.
141 */
142int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
143 uint32_t value, uint32_t mask)
144{
145 uint32_t i;
146 uint32_t cur_value;
147
148 if (hwmgr == NULL || hwmgr->device == NULL) {
149 printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
150 return -EINVAL;
151 }
152
153 for (i = 0; i < hwmgr->usec_timeout; i++) {
154 cur_value = cgs_read_register(hwmgr->device, index);
155 if ((cur_value & mask) == (value & mask))
156 break;
157 udelay(1);
158 }
159
160 /* timeout means wrong logic*/
161 if (i == hwmgr->usec_timeout)
162 return -1;
163 return 0;
164}
165
166int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
167 uint32_t index, uint32_t value, uint32_t mask)
168{
169 uint32_t i;
170 uint32_t cur_value;
171
172 if (hwmgr == NULL || hwmgr->device == NULL) {
173 printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
174 return -EINVAL;
175 }
176
177 for (i = 0; i < hwmgr->usec_timeout; i++) {
178 cur_value = cgs_read_register(hwmgr->device, index);
179 if ((cur_value & mask) != (value & mask))
180 break;
181 udelay(1);
182 }
183
184 /* timeout means wrong logic*/
185 if (i == hwmgr->usec_timeout)
186 return -1;
187 return 0;
188}
189
190
191/**
192 * Returns once the part of the register indicated by the mask has
193 * reached the given value.The indirect space is described by giving
194 * the memory-mapped index of the indirect index register.
195 */
196void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
197 uint32_t indirect_port,
198 uint32_t index,
199 uint32_t value,
200 uint32_t mask)
201{
202 if (hwmgr == NULL || hwmgr->device == NULL) {
203 printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
204 return;
205 }
206
207 cgs_write_register(hwmgr->device, indirect_port, index);
208 phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
209}
210
211void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
212 uint32_t indirect_port,
213 uint32_t index,
214 uint32_t value,
215 uint32_t mask)
216{
217 if (hwmgr == NULL || hwmgr->device == NULL) {
218 printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
219 return;
220 }
221
222 cgs_write_register(hwmgr->device, indirect_port, index);
223 phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
224 value, mask);
225}
226
227bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
228{
229 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
230}
231
232bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
233{
234 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
235}
236
237
238int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
239{
240 uint32_t i, j;
241 uint16_t vvalue;
242 bool found = false;
243 struct pp_atomctrl_voltage_table *table;
244
245 PP_ASSERT_WITH_CODE((NULL != vol_table),
246 "Voltage Table empty.", return -EINVAL);
247
248 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
249 GFP_KERNEL);
250
251 if (NULL == table)
252 return -EINVAL;
253
254 table->mask_low = vol_table->mask_low;
255 table->phase_delay = vol_table->phase_delay;
256
257 for (i = 0; i < vol_table->count; i++) {
258 vvalue = vol_table->entries[i].value;
259 found = false;
260
261 for (j = 0; j < table->count; j++) {
262 if (vvalue == table->entries[j].value) {
263 found = true;
264 break;
265 }
266 }
267
268 if (!found) {
269 table->entries[table->count].value = vvalue;
270 table->entries[table->count].smio_low =
271 vol_table->entries[i].smio_low;
272 table->count++;
273 }
274 }
275
276 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
277 kfree(table);
278
279 return 0;
280}
281
282int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
283 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
284{
285 uint32_t i;
286 int result;
287
288 PP_ASSERT_WITH_CODE((0 != dep_table->count),
289 "Voltage Dependency Table empty.", return -EINVAL);
290
291 PP_ASSERT_WITH_CODE((NULL != vol_table),
292 "vol_table empty.", return -EINVAL);
293
294 vol_table->mask_low = 0;
295 vol_table->phase_delay = 0;
296 vol_table->count = dep_table->count;
297
298 for (i = 0; i < dep_table->count; i++) {
299 vol_table->entries[i].value = dep_table->entries[i].mvdd;
300 vol_table->entries[i].smio_low = 0;
301 }
302
303 result = phm_trim_voltage_table(vol_table);
304 PP_ASSERT_WITH_CODE((0 == result),
305 "Failed to trim MVDD table.", return result);
306
307 return 0;
308}
309
310int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
311 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
312{
313 uint32_t i;
314 int result;
315
316 PP_ASSERT_WITH_CODE((0 != dep_table->count),
317 "Voltage Dependency Table empty.", return -EINVAL);
318
319 PP_ASSERT_WITH_CODE((NULL != vol_table),
320 "vol_table empty.", return -EINVAL);
321
322 vol_table->mask_low = 0;
323 vol_table->phase_delay = 0;
324 vol_table->count = dep_table->count;
325
326 for (i = 0; i < dep_table->count; i++) {
327 vol_table->entries[i].value = dep_table->entries[i].vddci;
328 vol_table->entries[i].smio_low = 0;
329 }
330
331 result = phm_trim_voltage_table(vol_table);
332 PP_ASSERT_WITH_CODE((0 == result),
333 "Failed to trim VDDCI table.", return result);
334
335 return 0;
336}
337
338int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
339 phm_ppt_v1_voltage_lookup_table *lookup_table)
340{
341 int i = 0;
342
343 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
344 "Voltage Lookup Table empty.", return -EINVAL);
345
346 PP_ASSERT_WITH_CODE((NULL != vol_table),
347 "vol_table empty.", return -EINVAL);
348
349 vol_table->mask_low = 0;
350 vol_table->phase_delay = 0;
351
352 vol_table->count = lookup_table->count;
353
354 for (i = 0; i < vol_table->count; i++) {
355 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
356 vol_table->entries[i].smio_low = 0;
357 }
358
359 return 0;
360}
361
362void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
363 struct pp_atomctrl_voltage_table *vol_table)
364{
365 unsigned int i, diff;
366
367 if (vol_table->count <= max_vol_steps)
368 return;
369
370 diff = vol_table->count - max_vol_steps;
371
372 for (i = 0; i < max_vol_steps; i++)
373 vol_table->entries[i] = vol_table->entries[i + diff];
374
375 vol_table->count = max_vol_steps;
376
377 return;
378}
379
380int phm_reset_single_dpm_table(void *table,
381 uint32_t count, int max)
382{
383 int i;
384
385 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
386
387 PP_ASSERT_WITH_CODE(count <= max,
388 "Fatal error, can not set up single DPM table entries to exceed max number!",
389 );
390
391 dpm_table->count = count;
392 for (i = 0; i < max; i++)
393 dpm_table->dpm_level[i].enabled = false;
394
395 return 0;
396}
397
398void phm_setup_pcie_table_entry(
399 void *table,
400 uint32_t index, uint32_t pcie_gen,
401 uint32_t pcie_lanes)
402{
403 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
404 dpm_table->dpm_level[index].value = pcie_gen;
405 dpm_table->dpm_level[index].param1 = pcie_lanes;
406 dpm_table->dpm_level[index].enabled = 1;
407}
408
409int32_t phm_get_dpm_level_enable_mask_value(void *table)
410{
411 int32_t i;
412 int32_t mask = 0;
413 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
414
415 for (i = dpm_table->count; i > 0; i--) {
416 mask = mask << 1;
417 if (dpm_table->dpm_level[i - 1].enabled)
418 mask |= 0x1;
419 else
420 mask &= 0xFFFFFFFE;
421 }
422
423 return mask;
424}
425
426uint8_t phm_get_voltage_index(
427 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
428{
429 uint8_t count = (uint8_t) (lookup_table->count);
430 uint8_t i;
431
432 PP_ASSERT_WITH_CODE((NULL != lookup_table),
433 "Lookup Table empty.", return 0);
434 PP_ASSERT_WITH_CODE((0 != count),
435 "Lookup Table empty.", return 0);
436
437 for (i = 0; i < lookup_table->count; i++) {
438 /* find first voltage equal or bigger than requested */
439 if (lookup_table->entries[i].us_vdd >= voltage)
440 return i;
441 }
442 /* voltage is bigger than max voltage in the table */
443 return i - 1;
444}
445
446uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
447{
448 uint32_t i;
449
450 for (i = 0; i < vddci_table->count; i++) {
451 if (vddci_table->entries[i].value >= vddci)
452 return vddci_table->entries[i].value;
453 }
454
455 PP_ASSERT_WITH_CODE(false,
456 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
457 return vddci_table->entries[i].value);
458}
459
460int phm_find_boot_level(void *table,
461 uint32_t value, uint32_t *boot_level)
462{
463 int result = -EINVAL;
464 uint32_t i;
465 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
466
467 for (i = 0; i < dpm_table->count; i++) {
468 if (value == dpm_table->dpm_level[i].value) {
469 *boot_level = i;
470 result = 0;
471 }
472 }
473
474 return result;
475}
476
477int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
478 phm_ppt_v1_voltage_lookup_table *lookup_table,
479 uint16_t virtual_voltage_id, int32_t *sclk)
480{
481 uint8_t entryId;
482 uint8_t voltageId;
483 struct phm_ppt_v1_information *table_info =
484 (struct phm_ppt_v1_information *)(hwmgr->pptable);
485
486 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
487
488 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
489 for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
490 voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
491 if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
492 break;
493 }
494
495 PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
496 "Can't find requested voltage id in vdd_dep_on_sclk table!",
497 return -EINVAL;
498 );
499
500 *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
501
502 return 0;
503}
504
505/**
506 * Initialize Dynamic State Adjustment Rule Settings
507 *
508 * @param hwmgr the address of the powerplay hardware manager.
509 */
510int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
511{
512 uint32_t table_size;
513 struct phm_clock_voltage_dependency_table *table_clk_vlt;
514 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
515
516 /* initialize vddc_dep_on_dal_pwrl table */
517 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
518 table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
519
520 if (NULL == table_clk_vlt) {
521 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
522 return -ENOMEM;
523 } else {
524 table_clk_vlt->count = 4;
525 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
526 table_clk_vlt->entries[0].v = 0;
527 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
528 table_clk_vlt->entries[1].v = 720;
529 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
530 table_clk_vlt->entries[2].v = 810;
531 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
532 table_clk_vlt->entries[3].v = 900;
533 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
534 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
535 }
536
537 return 0;
538}
539
540int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
541{
542 if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
543 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
544 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
545 }
546
547 if (NULL != hwmgr->backend) {
548 kfree(hwmgr->backend);
549 hwmgr->backend = NULL;
550 }
551
552 return 0;
553}
554
555uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
556{
557 uint32_t level = 0;
558
559 while (0 == (mask & (1 << level)))
560 level++;
561
562 return level;
563}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
new file mode 100644
index 000000000000..c9e6c2d80ea6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef PP_HWMGR_PPT_H
25#define PP_HWMGR_PPT_H
26
27#include "hardwaremanager.h"
28#include "smumgr.h"
29#include "atom-types.h"
30
31struct phm_ppt_v1_clock_voltage_dependency_record {
32 uint32_t clk;
33 uint8_t vddInd;
34 uint16_t vdd_offset;
35 uint16_t vddc;
36 uint16_t vddgfx;
37 uint16_t vddci;
38 uint16_t mvdd;
39 uint8_t phases;
40 uint8_t cks_enable;
41 uint8_t cks_voffset;
42};
43
44typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
45
46struct phm_ppt_v1_clock_voltage_dependency_table {
47 uint32_t count; /* Number of entries. */
48 phm_ppt_v1_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
49};
50
51typedef struct phm_ppt_v1_clock_voltage_dependency_table phm_ppt_v1_clock_voltage_dependency_table;
52
53
54/* Multimedia Clock Voltage Dependency records and table */
55struct phm_ppt_v1_mm_clock_voltage_dependency_record {
56 uint32_t dclk; /* UVD D-clock */
57 uint32_t vclk; /* UVD V-clock */
58 uint32_t eclk; /* VCE clock */
59 uint32_t aclk; /* ACP clock */
60 uint32_t samclock; /* SAMU clock */
61 uint8_t vddcInd;
62 uint16_t vddgfx_offset;
63 uint16_t vddc;
64 uint16_t vddgfx;
65 uint8_t phases;
66};
67typedef struct phm_ppt_v1_mm_clock_voltage_dependency_record phm_ppt_v1_mm_clock_voltage_dependency_record;
68
69struct phm_ppt_v1_mm_clock_voltage_dependency_table {
70 uint32_t count; /* Number of entries. */
71 phm_ppt_v1_mm_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
72};
73typedef struct phm_ppt_v1_mm_clock_voltage_dependency_table phm_ppt_v1_mm_clock_voltage_dependency_table;
74
75struct phm_ppt_v1_voltage_lookup_record {
76 uint16_t us_calculated;
77 uint16_t us_vdd; /* Base voltage */
78 uint16_t us_cac_low;
79 uint16_t us_cac_mid;
80 uint16_t us_cac_high;
81};
82typedef struct phm_ppt_v1_voltage_lookup_record phm_ppt_v1_voltage_lookup_record;
83
84struct phm_ppt_v1_voltage_lookup_table {
85 uint32_t count;
86 phm_ppt_v1_voltage_lookup_record entries[1]; /* Dynamically allocate count entries. */
87};
88typedef struct phm_ppt_v1_voltage_lookup_table phm_ppt_v1_voltage_lookup_table;
89
90/* PCIE records and Table */
91
92struct phm_ppt_v1_pcie_record {
93 uint8_t gen_speed;
94 uint8_t lane_width;
95};
96typedef struct phm_ppt_v1_pcie_record phm_ppt_v1_pcie_record;
97
98struct phm_ppt_v1_pcie_table {
99 uint32_t count; /* Number of entries. */
100 phm_ppt_v1_pcie_record entries[1]; /* Dynamically allocate count entries. */
101};
102typedef struct phm_ppt_v1_pcie_table phm_ppt_v1_pcie_table;
103
104#endif
105
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
new file mode 100644
index 000000000000..7b2d5000292d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -0,0 +1,76 @@
1#include <linux/errno.h>
2#include "linux/delay.h"
3#include "hwmgr.h"
4#include "amd_acpi.h"
5
6bool acpi_atcs_functions_supported(void *device, uint32_t index)
7{
8 int32_t result;
9 struct atcs_verify_interface output_buf = {0};
10
11 int32_t temp_buffer = 1;
12
13 result = cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
14 ATCS_FUNCTION_VERIFY_INTERFACE,
15 &temp_buffer,
16 &output_buf,
17 1,
18 sizeof(temp_buffer),
19 sizeof(output_buf));
20
21 return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
22}
23
24int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
25{
26 struct atcs_pref_req_input atcs_input;
27 struct atcs_pref_req_output atcs_output;
28 u32 retry = 3;
29 int result;
30 struct cgs_system_info info = {0};
31
32 if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
33 return -EINVAL;
34
35 info.size = sizeof(struct cgs_system_info);
36 info.info_id = CGS_SYSTEM_INFO_ADAPTER_BDF_ID;
37 result = cgs_query_system_info(device, &info);
38 if (result != 0)
39 return -EINVAL;
40 atcs_input.client_id = (uint16_t)info.value;
41 atcs_input.size = sizeof(struct atcs_pref_req_input);
42 atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
43 atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
44 if (advertise)
45 atcs_input.flags |= ATCS_ADVERTISE_CAPS;
46 atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
47 atcs_input.perf_req = perf_req;
48
49 atcs_output.size = sizeof(struct atcs_pref_req_input);
50
51 while (retry--) {
52 result = cgs_call_acpi_method(device,
53 CGS_ACPI_METHOD_ATCS,
54 ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
55 &atcs_input,
56 &atcs_output,
57 0,
58 sizeof(atcs_input),
59 sizeof(atcs_output));
60 if (result != 0)
61 return -EIO;
62
63 switch (atcs_output.ret_val) {
64 case ATCS_REQUEST_REFUSED:
65 default:
66 return -EINVAL;
67 case ATCS_REQUEST_COMPLETE:
68 return 0;
69 case ATCS_REQUEST_IN_PROGRESS:
70 udelay(10);
71 break;
72 }
73 }
74
75 return 0;
76}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
new file mode 100644
index 000000000000..2a83a4af2904
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -0,0 +1,1207 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26
27#include "ppatomctrl.h"
28#include "atombios.h"
29#include "cgs_common.h"
30#include "pp_debug.h"
31#include "ppevvmath.h"
32
33#define MEM_ID_MASK 0xff000000
34#define MEM_ID_SHIFT 24
35#define CLOCK_RANGE_MASK 0x00ffffff
36#define CLOCK_RANGE_SHIFT 0
37#define LOW_NIBBLE_MASK 0xf
38#define DATA_EQU_PREV 0
39#define DATA_FROM_TABLE 4
40
41union voltage_object_info {
42 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
43 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
44 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
45};
46
47static int atomctrl_retrieve_ac_timing(
48 uint8_t index,
49 ATOM_INIT_REG_BLOCK *reg_block,
50 pp_atomctrl_mc_reg_table *table)
51{
52 uint32_t i, j;
53 uint8_t tmem_id;
54 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
55 ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
56
57 uint8_t num_ranges = 0;
58
59 while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
60 num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
61 tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
62
63 if (index == tmem_id) {
64 table->mc_reg_table_entry[num_ranges].mclk_max =
65 (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
66 CLOCK_RANGE_SHIFT);
67
68 for (i = 0, j = 1; i < table->last; i++) {
69 if ((table->mc_reg_address[i].uc_pre_reg_data &
70 LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
71 table->mc_reg_table_entry[num_ranges].mc_data[i] =
72 (uint32_t)*((uint32_t *)reg_data + j);
73 j++;
74 } else if ((table->mc_reg_address[i].uc_pre_reg_data &
75 LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
76 table->mc_reg_table_entry[num_ranges].mc_data[i] =
77 table->mc_reg_table_entry[num_ranges].mc_data[i-1];
78 }
79 }
80 num_ranges++;
81 }
82
83 reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
84 ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
85 }
86
87 PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
88 "Invalid VramInfo table.", return -1);
89 table->num_entries = num_ranges;
90
91 return 0;
92}
93
94/**
95 * Get memory clock AC timing registers index from VBIOS table
96 * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
97 * @param reg_block the address ATOM_INIT_REG_BLOCK
98 * @param table the address of MCRegTable
99 * @return 0
100 */
101static int atomctrl_set_mc_reg_address_table(
102 ATOM_INIT_REG_BLOCK *reg_block,
103 pp_atomctrl_mc_reg_table *table)
104{
105 uint8_t i = 0;
106 uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
107 / sizeof(ATOM_INIT_REG_INDEX_FORMAT));
108 ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
109
110 num_entries--; /* subtract 1 data end mark entry */
111
112 PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
113 "Invalid VramInfo table.", return -1);
114
115 /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
116 while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
117 (i < num_entries)) {
118 table->mc_reg_address[i].s1 =
119 (uint16_t)(le16_to_cpu(format->usRegIndex));
120 table->mc_reg_address[i].uc_pre_reg_data =
121 format->ucPreRegDataLength;
122
123 i++;
124 format = (ATOM_INIT_REG_INDEX_FORMAT *)
125 ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
126 }
127
128 table->last = i;
129 return 0;
130}
131
132
133int atomctrl_initialize_mc_reg_table(
134 struct pp_hwmgr *hwmgr,
135 uint8_t module_index,
136 pp_atomctrl_mc_reg_table *table)
137{
138 ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
139 ATOM_INIT_REG_BLOCK *reg_block;
140 int result = 0;
141 u8 frev, crev;
142 u16 size;
143
144 vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
145 cgs_atom_get_data_table(hwmgr->device,
146 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
147
148 if (module_index >= vram_info->ucNumOfVRAMModule) {
149 printk(KERN_ERR "[ powerplay ] Invalid VramInfo table.");
150 result = -1;
151 } else if (vram_info->sHeader.ucTableFormatRevision < 2) {
152 printk(KERN_ERR "[ powerplay ] Invalid VramInfo table.");
153 result = -1;
154 }
155
156 if (0 == result) {
157 reg_block = (ATOM_INIT_REG_BLOCK *)
158 ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
159 result = atomctrl_set_mc_reg_address_table(reg_block, table);
160 }
161
162 if (0 == result) {
163 result = atomctrl_retrieve_ac_timing(module_index,
164 reg_block, table);
165 }
166
167 return result;
168}
169
170/**
171 * Set DRAM timings based on engine clock and memory clock.
172 */
173int atomctrl_set_engine_dram_timings_rv770(
174 struct pp_hwmgr *hwmgr,
175 uint32_t engine_clock,
176 uint32_t memory_clock)
177{
178 SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
179
180 /* They are both in 10KHz Units. */
181 engine_clock_parameters.ulTargetEngineClock =
182 (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK;
183 engine_clock_parameters.ulTargetEngineClock |=
184 (COMPUTE_ENGINE_PLL_PARAM << 24);
185
186 /* in 10 khz units.*/
187 engine_clock_parameters.sReserved.ulClock =
188 (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK;
189 return cgs_atom_exec_cmd_table(hwmgr->device,
190 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
191 &engine_clock_parameters);
192}
193
194/**
195 * Private Function to get the PowerPlay Table Address.
196 * WARNING: The tabled returned by this function is in
197 * dynamically allocated memory.
198 * The caller has to release if by calling kfree.
199 */
200static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
201{
202 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
203 u8 frev, crev;
204 u16 size;
205 union voltage_object_info *voltage_info;
206
207 voltage_info = (union voltage_object_info *)
208 cgs_atom_get_data_table(device, index,
209 &size, &frev, &crev);
210
211 if (voltage_info != NULL)
212 return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
213 else
214 return NULL;
215}
216
217static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
218 const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
219 uint8_t voltage_type, uint8_t voltage_mode)
220{
221 unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
222 unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
223 uint8_t *start = (uint8_t *)voltage_object_info_table;
224
225 while (offset < size) {
226 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
227 (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
228
229 if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
230 voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
231 return voltage_object;
232
233 offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
234 }
235
236 return NULL;
237}
238
239/** atomctrl_get_memory_pll_dividers_si().
240 *
241 * @param hwmgr input parameter: pointer to HwMgr
242 * @param clock_value input parameter: memory clock
243 * @param dividers output parameter: memory PLL dividers
244 * @param strobe_mode input parameter: 1 for strobe mode, 0 for performance mode
245 */
246int atomctrl_get_memory_pll_dividers_si(
247 struct pp_hwmgr *hwmgr,
248 uint32_t clock_value,
249 pp_atomctrl_memory_clock_param *mpll_param,
250 bool strobe_mode)
251{
252 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
253 int result;
254
255 mpll_parameters.ulClock = (uint32_t) clock_value;
256 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
257
258 result = cgs_atom_exec_cmd_table
259 (hwmgr->device,
260 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
261 &mpll_parameters);
262
263 if (0 == result) {
264 mpll_param->mpll_fb_divider.clk_frac =
265 mpll_parameters.ulFbDiv.usFbDivFrac;
266 mpll_param->mpll_fb_divider.cl_kf =
267 mpll_parameters.ulFbDiv.usFbDiv;
268 mpll_param->mpll_post_divider =
269 (uint32_t)mpll_parameters.ucPostDiv;
270 mpll_param->vco_mode =
271 (uint32_t)(mpll_parameters.ucPllCntlFlag &
272 MPLL_CNTL_FLAG_VCO_MODE_MASK);
273 mpll_param->yclk_sel =
274 (uint32_t)((mpll_parameters.ucPllCntlFlag &
275 MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
276 mpll_param->qdr =
277 (uint32_t)((mpll_parameters.ucPllCntlFlag &
278 MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
279 mpll_param->half_rate =
280 (uint32_t)((mpll_parameters.ucPllCntlFlag &
281 MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
282 mpll_param->dll_speed =
283 (uint32_t)(mpll_parameters.ucDllSpeed);
284 mpll_param->bw_ctrl =
285 (uint32_t)(mpll_parameters.ucBWCntl);
286 }
287
288 return result;
289}
290
291/** atomctrl_get_memory_pll_dividers_vi().
292 *
293 * @param hwmgr input parameter: pointer to HwMgr
294 * @param clock_value input parameter: memory clock
295 * @param dividers output parameter: memory PLL dividers
296 */
297int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
298 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
299{
300 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
301 int result;
302
303 mpll_parameters.ulClock.ulClock = (uint32_t)clock_value;
304
305 result = cgs_atom_exec_cmd_table(hwmgr->device,
306 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
307 &mpll_parameters);
308
309 if (!result)
310 mpll_param->mpll_post_divider =
311 (uint32_t)mpll_parameters.ulClock.ucPostDiv;
312
313 return result;
314}
315
316int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
317 uint32_t clock_value,
318 pp_atomctrl_clock_dividers_kong *dividers)
319{
320 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
321 int result;
322
323 pll_parameters.ulClock = clock_value;
324
325 result = cgs_atom_exec_cmd_table
326 (hwmgr->device,
327 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
328 &pll_parameters);
329
330 if (0 == result) {
331 dividers->pll_post_divider = pll_parameters.ucPostDiv;
332 dividers->real_clock = pll_parameters.ulClock;
333 }
334
335 return result;
336}
337
338int atomctrl_get_engine_pll_dividers_vi(
339 struct pp_hwmgr *hwmgr,
340 uint32_t clock_value,
341 pp_atomctrl_clock_dividers_vi *dividers)
342{
343 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
344 int result;
345
346 pll_patameters.ulClock.ulClock = clock_value;
347 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
348
349 result = cgs_atom_exec_cmd_table
350 (hwmgr->device,
351 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
352 &pll_patameters);
353
354 if (0 == result) {
355 dividers->pll_post_divider =
356 pll_patameters.ulClock.ucPostDiv;
357 dividers->real_clock =
358 pll_patameters.ulClock.ulClock;
359
360 dividers->ul_fb_div.ul_fb_div_frac =
361 pll_patameters.ulFbDiv.usFbDivFrac;
362 dividers->ul_fb_div.ul_fb_div =
363 pll_patameters.ulFbDiv.usFbDiv;
364
365 dividers->uc_pll_ref_div =
366 pll_patameters.ucPllRefDiv;
367 dividers->uc_pll_post_div =
368 pll_patameters.ucPllPostDiv;
369 dividers->uc_pll_cntl_flag =
370 pll_patameters.ucPllCntlFlag;
371 }
372
373 return result;
374}
375
376int atomctrl_get_dfs_pll_dividers_vi(
377 struct pp_hwmgr *hwmgr,
378 uint32_t clock_value,
379 pp_atomctrl_clock_dividers_vi *dividers)
380{
381 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
382 int result;
383
384 pll_patameters.ulClock.ulClock = clock_value;
385 pll_patameters.ulClock.ucPostDiv =
386 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
387
388 result = cgs_atom_exec_cmd_table
389 (hwmgr->device,
390 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
391 &pll_patameters);
392
393 if (0 == result) {
394 dividers->pll_post_divider =
395 pll_patameters.ulClock.ucPostDiv;
396 dividers->real_clock =
397 pll_patameters.ulClock.ulClock;
398
399 dividers->ul_fb_div.ul_fb_div_frac =
400 pll_patameters.ulFbDiv.usFbDivFrac;
401 dividers->ul_fb_div.ul_fb_div =
402 pll_patameters.ulFbDiv.usFbDiv;
403
404 dividers->uc_pll_ref_div =
405 pll_patameters.ucPllRefDiv;
406 dividers->uc_pll_post_div =
407 pll_patameters.ucPllPostDiv;
408 dividers->uc_pll_cntl_flag =
409 pll_patameters.ucPllCntlFlag;
410 }
411
412 return result;
413}
414
415/**
416 * Get the reference clock in 10KHz
417 */
418uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
419{
420 ATOM_FIRMWARE_INFO *fw_info;
421 u8 frev, crev;
422 u16 size;
423 uint32_t clock;
424
425 fw_info = (ATOM_FIRMWARE_INFO *)
426 cgs_atom_get_data_table(hwmgr->device,
427 GetIndexIntoMasterTable(DATA, FirmwareInfo),
428 &size, &frev, &crev);
429
430 if (fw_info == NULL)
431 clock = 2700;
432 else
433 clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
434
435 return clock;
436}
437
438/**
439 * Returns true if the given voltage type is controlled by GPIO pins.
440 * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
441 * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
442 * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
443 */
444bool atomctrl_is_voltage_controled_by_gpio_v3(
445 struct pp_hwmgr *hwmgr,
446 uint8_t voltage_type,
447 uint8_t voltage_mode)
448{
449 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
450 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
451 bool ret;
452
453 PP_ASSERT_WITH_CODE((NULL != voltage_info),
454 "Could not find Voltage Table in BIOS.", return false;);
455
456 ret = (NULL != atomctrl_lookup_voltage_type_v3
457 (voltage_info, voltage_type, voltage_mode)) ? true : false;
458
459 return ret;
460}
461
462int atomctrl_get_voltage_table_v3(
463 struct pp_hwmgr *hwmgr,
464 uint8_t voltage_type,
465 uint8_t voltage_mode,
466 pp_atomctrl_voltage_table *voltage_table)
467{
468 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
469 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
470 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
471 unsigned int i;
472
473 PP_ASSERT_WITH_CODE((NULL != voltage_info),
474 "Could not find Voltage Table in BIOS.", return -1;);
475
476 voltage_object = atomctrl_lookup_voltage_type_v3
477 (voltage_info, voltage_type, voltage_mode);
478
479 if (voltage_object == NULL)
480 return -1;
481
482 PP_ASSERT_WITH_CODE(
483 (voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
484 PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
485 "Too many voltage entries!",
486 return -1;
487 );
488
489 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
490 voltage_table->entries[i].value =
491 voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue;
492 voltage_table->entries[i].smio_low =
493 voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId;
494 }
495
496 voltage_table->mask_low =
497 voltage_object->asGpioVoltageObj.ulGpioMaskVal;
498 voltage_table->count =
499 voltage_object->asGpioVoltageObj.ucGpioEntryNum;
500 voltage_table->phase_delay =
501 voltage_object->asGpioVoltageObj.ucPhaseDelay;
502
503 return 0;
504}
505
506static bool atomctrl_lookup_gpio_pin(
507 ATOM_GPIO_PIN_LUT * gpio_lookup_table,
508 const uint32_t pinId,
509 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
510{
511 unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
512 unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
513 uint8_t *start = (uint8_t *)gpio_lookup_table;
514
515 while (offset < size) {
516 const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
517 (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
518
519 if (pinId == pin_assignment->ucGPIO_ID) {
520 gpio_pin_assignment->uc_gpio_pin_bit_shift =
521 pin_assignment->ucGpioPinBitShift;
522 gpio_pin_assignment->us_gpio_pin_aindex =
523 le16_to_cpu(pin_assignment->usGpioPin_AIndex);
524 return false;
525 }
526
527 offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
528 }
529
530 return true;
531}
532
533/**
534 * Private Function to get the PowerPlay Table Address.
535 * WARNING: The tabled returned by this function is in
536 * dynamically allocated memory.
537 * The caller has to release if by calling kfree.
538 */
539static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
540{
541 u8 frev, crev;
542 u16 size;
543 void *table_address;
544
545 table_address = (ATOM_GPIO_PIN_LUT *)
546 cgs_atom_get_data_table(device,
547 GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
548 &size, &frev, &crev);
549
550 PP_ASSERT_WITH_CODE((NULL != table_address),
551 "Error retrieving BIOS Table Address!", return NULL;);
552
553 return (ATOM_GPIO_PIN_LUT *)table_address;
554}
555
556/**
557 * Returns 1 if the given pin id find in lookup table.
558 */
559bool atomctrl_get_pp_assign_pin(
560 struct pp_hwmgr *hwmgr,
561 const uint32_t pinId,
562 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
563{
564 bool bRet = 0;
565 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
566 get_gpio_lookup_table(hwmgr->device);
567
568 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
569 "Could not find GPIO lookup Table in BIOS.", return -1);
570
571 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
572 gpio_pin_assignment);
573
574 return bRet;
575}
576
577int atomctrl_calculate_voltage_evv_on_sclk(
578 struct pp_hwmgr *hwmgr,
579 uint8_t voltage_type,
580 uint32_t sclk,
581 uint16_t virtual_voltage_Id,
582 uint16_t *voltage,
583 uint16_t dpm_level,
584 bool debug)
585{
586 ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
587
588 EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
589 EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
590 EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
591 EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
592 EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
593 EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
594 EFUSE_INPUT_PARAMETER sInput_FuseValues;
595 READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
596
597 uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
598 fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
599 fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
600 fInt fLkg_FT, repeat;
601 fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
602 fInt fRLL_LoadLine, fPowerDPMx, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
603 fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
604 fInt fSclk_margin, fSclk, fEVV_V;
605 fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
606 uint32_t ul_FT_Lkg_V0NORM;
607 fInt fLn_MaxDivMin, fMin, fAverage, fRange;
608 fInt fRoots[2];
609 fInt fStepSize = GetScaledFraction(625, 100000);
610
611 int result;
612
613 getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
614 cgs_atom_get_data_table(hwmgr->device,
615 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
616 NULL, NULL, NULL);
617
618 if (!getASICProfilingInfo)
619 return -1;
620
621 if(getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
622 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
623 getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
624 return -1;
625
626 /*-----------------------------------------------------------
627 *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
628 *-----------------------------------------------------------
629 */
630 fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
631
632 switch (dpm_level) {
633 case 1:
634 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1);
635 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000);
636 break;
637 case 2:
638 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2);
639 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000);
640 break;
641 case 3:
642 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3);
643 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000);
644 break;
645 case 4:
646 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4);
647 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000);
648 break;
649 case 5:
650 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5);
651 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000);
652 break;
653 case 6:
654 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6);
655 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000);
656 break;
657 case 7:
658 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7);
659 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000);
660 break;
661 default:
662 printk(KERN_ERR "DPM Level not supported\n");
663 fPowerDPMx = Convert_ULONG_ToFraction(1);
664 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000);
665 }
666
667 /*-------------------------
668 * DECODING FUSE VALUES
669 * ------------------------
670 */
671 /*Decode RO_Fused*/
672 sRO_fuse = getASICProfilingInfo->sRoFuse;
673
674 sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
675 sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
676 sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
677
678 sOutput_FuseValues.sEfuse = sInput_FuseValues;
679
680 result = cgs_atom_exec_cmd_table(hwmgr->device,
681 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
682 &sOutput_FuseValues);
683
684 if (result)
685 return result;
686
687 /* Finally, the actual fuse value */
688 ul_RO_fused = sOutput_FuseValues.ulEfuseValue;
689 fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1);
690 fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1);
691 fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
692
693 sCACm_fuse = getASICProfilingInfo->sCACm;
694
695 sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
696 sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
697 sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
698
699 sOutput_FuseValues.sEfuse = sInput_FuseValues;
700
701 result = cgs_atom_exec_cmd_table(hwmgr->device,
702 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
703 &sOutput_FuseValues);
704
705 if (result)
706 return result;
707
708 ul_CACm_fused = sOutput_FuseValues.ulEfuseValue;
709 fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000);
710 fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000);
711
712 fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
713
714 sCACb_fuse = getASICProfilingInfo->sCACb;
715
716 sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
717 sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
718 sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
719 sOutput_FuseValues.sEfuse = sInput_FuseValues;
720
721 result = cgs_atom_exec_cmd_table(hwmgr->device,
722 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
723 &sOutput_FuseValues);
724
725 if (result)
726 return result;
727
728 ul_CACb_fused = sOutput_FuseValues.ulEfuseValue;
729 fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000);
730 fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000);
731
732 fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
733
734 sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
735
736 sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
737 sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
738 sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
739
740 sOutput_FuseValues.sEfuse = sInput_FuseValues;
741
742 result = cgs_atom_exec_cmd_table(hwmgr->device,
743 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
744 &sOutput_FuseValues);
745
746 if (result)
747 return result;
748
749 ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue;
750 fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000);
751 fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000);
752
753 fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
754 fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
755
756 sKv_m_fuse = getASICProfilingInfo->sKv_m;
757
758 sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
759 sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
760 sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
761
762 sOutput_FuseValues.sEfuse = sInput_FuseValues;
763
764 result = cgs_atom_exec_cmd_table(hwmgr->device,
765 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
766 &sOutput_FuseValues);
767 if (result)
768 return result;
769
770 ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue;
771 fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000);
772 fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000);
773 fRange = fMultiply(fRange, ConvertToFraction(-1));
774
775 fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
776 fAverage, fRange, sKv_m_fuse.ucEfuseLength);
777
778 sKv_b_fuse = getASICProfilingInfo->sKv_b;
779
780 sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
781 sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
782 sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
783 sOutput_FuseValues.sEfuse = sInput_FuseValues;
784
785 result = cgs_atom_exec_cmd_table(hwmgr->device,
786 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
787 &sOutput_FuseValues);
788
789 if (result)
790 return result;
791
792 ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue;
793 fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000);
794 fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000);
795
796 fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
797 fAverage, fRange, sKv_b_fuse.ucEfuseLength);
798
799 /* Decoding the Leakage - No special struct container */
800 /*
801 * usLkgEuseIndex=56
802 * ucLkgEfuseBitLSB=6
803 * ucLkgEfuseLength=10
804 * ulLkgEncodeLn_MaxDivMin=69077
805 * ulLkgEncodeMax=1000000
806 * ulLkgEncodeMin=1000
807 * ulEfuseLogisticAlpha=13
808 */
809
810 sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
811 sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
812 sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
813
814 sOutput_FuseValues.sEfuse = sInput_FuseValues;
815
816 result = cgs_atom_exec_cmd_table(hwmgr->device,
817 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
818 &sOutput_FuseValues);
819
820 if (result)
821 return result;
822
823 ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue;
824 fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000);
825 fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000);
826
827 fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
828 fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
829 fLkg_FT = fFT_Lkg_V0NORM;
830
831 /*-------------------------------------------
832 * PART 2 - Grabbing all required values
833 *-------------------------------------------
834 */
835 fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000),
836 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
837 fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000),
838 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
839 fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000),
840 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
841 fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000),
842 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
843 fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000),
844 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
845 fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000),
846 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
847 fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000),
848 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
849 fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000),
850 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
851
852 fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a);
853 fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b);
854 fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c);
855
856 fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed);
857
858 fMargin_FMAX_mean = GetScaledFraction(
859 getASICProfilingInfo->ulMargin_Fmax_mean, 10000);
860 fMargin_Plat_mean = GetScaledFraction(
861 getASICProfilingInfo->ulMargin_plat_mean, 10000);
862 fMargin_FMAX_sigma = GetScaledFraction(
863 getASICProfilingInfo->ulMargin_Fmax_sigma, 10000);
864 fMargin_Plat_sigma = GetScaledFraction(
865 getASICProfilingInfo->ulMargin_plat_sigma, 10000);
866
867 fMargin_DC_sigma = GetScaledFraction(
868 getASICProfilingInfo->ulMargin_DC_sigma, 100);
869 fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
870
871 fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
872 fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
873 fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
874 fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
875 fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
876
877 fSclk = GetScaledFraction(sclk, 100);
878
879 fV_max = fDivide(GetScaledFraction(
880 getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4));
881 fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10);
882 fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100);
883 fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10);
884 fV_FT = fDivide(GetScaledFraction(
885 getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4));
886 fV_min = fDivide(GetScaledFraction(
887 getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4));
888
889 /*-----------------------
890 * PART 3
891 *-----------------------
892 */
893
894 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4,fSclk), fSM_A5));
895 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
896 fC_Term = fAdd(fMargin_RO_c,
897 fAdd(fMultiply(fSM_A0,fLkg_FT),
898 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT,fSclk)),
899 fAdd(fMultiply(fSM_A3, fSclk),
900 fSubtract(fSM_A7,fRO_fused)))));
901
902 fVDDC_base = fSubtract(fRO_fused,
903 fSubtract(fMargin_RO_c,
904 fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
905 fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0,fSclk), fSM_A2));
906
907 repeat = fSubtract(fVDDC_base,
908 fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
909
910 fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
911 fGetSquare(repeat)),
912 fAdd(fMultiply(fMargin_RO_b, repeat),
913 fMargin_RO_c));
914
915 fDC_SCLK = fSubtract(fRO_fused,
916 fSubtract(fRO_DC_margin,
917 fSubtract(fSM_A3,
918 fMultiply(fSM_A2, repeat))));
919 fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0,repeat), fSM_A1));
920
921 fSigma_DC = fSubtract(fSclk, fDC_SCLK);
922
923 fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
924 fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
925 fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
926 fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
927
928 fSquared_Sigma_DC = fGetSquare(fSigma_DC);
929 fSquared_Sigma_CR = fGetSquare(fSigma_CR);
930 fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
931
932 fSclk_margin = fAdd(fMicro_FMAX,
933 fAdd(fMicro_CR,
934 fAdd(fMargin_fixed,
935 fSqrt(fAdd(fSquared_Sigma_FMAX,
936 fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
937 /*
938 fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
939 fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
940 fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
941 */
942
943 fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
944 fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
945 fC_Term = fAdd(fRO_DC_margin,
946 fAdd(fMultiply(fSM_A0, fLkg_FT),
947 fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
948 fAdd(fSclk, fSclk_margin)),
949 fAdd(fMultiply(fSM_A3,
950 fAdd(fSclk, fSclk_margin)),
951 fSubtract(fSM_A7, fRO_fused)))));
952
953 SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
954
955 if (GreaterThan(fRoots[0], fRoots[1]))
956 fEVV_V = fRoots[1];
957 else
958 fEVV_V = fRoots[0];
959
960 if (GreaterThan(fV_min, fEVV_V))
961 fEVV_V = fV_min;
962 else if (GreaterThan(fEVV_V, fV_max))
963 fEVV_V = fSubtract(fV_max, fStepSize);
964
965 fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
966
967 /*-----------------
968 * PART 4
969 *-----------------
970 */
971
972 fV_x = fV_min;
973
974 while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
975 fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
976 fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
977 fGetSquare(fV_x)), fDerateTDP);
978
979 fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
980 fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
981 fT_prod), fKv_b_fused), fV_x)), fV_x)));
982 fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
983 fKt_Beta_fused, fT_prod)));
984 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
985 fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
986 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
987 fKt_Beta_fused, fT_FT)));
988
989 fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
990
991 fTDP_Current = fDivide(fTDP_Power, fV_x);
992
993 fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
994 ConvertToFraction(10)));
995
996 fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
997
998 if (GreaterThan(fV_max, fV_NL) &&
999 (GreaterThan(fV_NL,fEVV_V) ||
1000 Equal(fV_NL, fEVV_V))) {
1001 fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
1002
1003 *voltage = (uint16_t)fV_NL.partial.real;
1004 break;
1005 } else
1006 fV_x = fAdd(fV_x, fStepSize);
1007 }
1008
1009 return result;
1010}
1011
1012/** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table.
1013 * @param hwmgr input: pointer to hwManager
1014 * @param voltage_type input: type of EVV voltage VDDC or VDDGFX
1015 * @param sclk input: in 10Khz unit. DPM state SCLK frequency
1016 * which is define in PPTable SCLK/VDDC dependence
1017 * table associated with this virtual_voltage_Id
1018 * @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1019 * @param voltage output: real voltage level in unit of mv
1020 */
1021int atomctrl_get_voltage_evv_on_sclk(
1022 struct pp_hwmgr *hwmgr,
1023 uint8_t voltage_type,
1024 uint32_t sclk, uint16_t virtual_voltage_Id,
1025 uint16_t *voltage)
1026{
1027 int result;
1028 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1029
1030 get_voltage_info_param_space.ucVoltageType =
1031 voltage_type;
1032 get_voltage_info_param_space.ucVoltageMode =
1033 ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1034 get_voltage_info_param_space.usVoltageLevel =
1035 virtual_voltage_Id;
1036 get_voltage_info_param_space.ulSCLKFreq =
1037 sclk;
1038
1039 result = cgs_atom_exec_cmd_table(hwmgr->device,
1040 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1041 &get_voltage_info_param_space);
1042
1043 if (0 != result)
1044 return result;
1045
1046 *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1047 (&get_voltage_info_param_space))->usVoltageLevel;
1048
1049 return result;
1050}
1051
1052/**
1053 * Get the mpll reference clock in 10KHz
1054 */
1055uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
1056{
1057 ATOM_COMMON_TABLE_HEADER *fw_info;
1058 uint32_t clock;
1059 u8 frev, crev;
1060 u16 size;
1061
1062 fw_info = (ATOM_COMMON_TABLE_HEADER *)
1063 cgs_atom_get_data_table(hwmgr->device,
1064 GetIndexIntoMasterTable(DATA, FirmwareInfo),
1065 &size, &frev, &crev);
1066
1067 if (fw_info == NULL)
1068 clock = 2700;
1069 else {
1070 if ((fw_info->ucTableFormatRevision == 2) &&
1071 (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
1072 ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
1073 (ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
1074 clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
1075 } else {
1076 ATOM_FIRMWARE_INFO *fwInfo_0_0 =
1077 (ATOM_FIRMWARE_INFO *)fw_info;
1078 clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
1079 }
1080 }
1081
1082 return clock;
1083}
1084
1085/**
1086 * Get the asic internal spread spectrum table
1087 */
1088static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
1089{
1090 ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
1091 u8 frev, crev;
1092 u16 size;
1093
1094 table = (ATOM_ASIC_INTERNAL_SS_INFO *)
1095 cgs_atom_get_data_table(device,
1096 GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
1097 &size, &frev, &crev);
1098
1099 return table;
1100}
1101
1102/**
1103 * Get the asic internal spread spectrum assignment
1104 */
1105static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1106 const uint8_t clockSource,
1107 const uint32_t clockSpeed,
1108 pp_atomctrl_internal_ss_info *ssEntry)
1109{
1110 ATOM_ASIC_INTERNAL_SS_INFO *table;
1111 ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
1112 int entry_found = 0;
1113
1114 memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
1115
1116 table = asic_internal_ss_get_ss_table(hwmgr->device);
1117
1118 if (NULL == table)
1119 return -1;
1120
1121 ssInfo = &table->asSpreadSpectrum[0];
1122
1123 while (((uint8_t *)ssInfo - (uint8_t *)table) <
1124 le16_to_cpu(table->sHeader.usStructureSize)) {
1125 if ((clockSource == ssInfo->ucClockIndication) &&
1126 ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
1127 entry_found = 1;
1128 break;
1129 }
1130
1131 ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
1132 sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1133 }
1134
1135 if (entry_found) {
1136 ssEntry->speed_spectrum_percentage =
1137 ssInfo->usSpreadSpectrumPercentage;
1138 ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz;
1139
1140 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
1141 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
1142 (GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
1143 ssEntry->speed_spectrum_rate /= 100;
1144 }
1145
1146 switch (ssInfo->ucSpreadSpectrumMode) {
1147 case 0:
1148 ssEntry->speed_spectrum_mode =
1149 pp_atomctrl_spread_spectrum_mode_down;
1150 break;
1151 case 1:
1152 ssEntry->speed_spectrum_mode =
1153 pp_atomctrl_spread_spectrum_mode_center;
1154 break;
1155 default:
1156 ssEntry->speed_spectrum_mode =
1157 pp_atomctrl_spread_spectrum_mode_down;
1158 break;
1159 }
1160 }
1161
1162 return entry_found ? 0 : 1;
1163}
1164
1165/**
1166 * Get the memory clock spread spectrum info
1167 */
1168int atomctrl_get_memory_clock_spread_spectrum(
1169 struct pp_hwmgr *hwmgr,
1170 const uint32_t memory_clock,
1171 pp_atomctrl_internal_ss_info *ssInfo)
1172{
1173 return asic_internal_ss_get_ss_asignment(hwmgr,
1174 ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
1175}
1176/**
1177 * Get the engine clock spread spectrum info
1178 */
1179int atomctrl_get_engine_clock_spread_spectrum(
1180 struct pp_hwmgr *hwmgr,
1181 const uint32_t engine_clock,
1182 pp_atomctrl_internal_ss_info *ssInfo)
1183{
1184 return asic_internal_ss_get_ss_asignment(hwmgr,
1185 ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
1186}
1187
1188int atomctrl_read_efuse(void *device, uint16_t start_index,
1189 uint16_t end_index, uint32_t mask, uint32_t *efuse)
1190{
1191 int result;
1192 READ_EFUSE_VALUE_PARAMETER efuse_param;
1193
1194 efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4;
1195 efuse_param.sEfuse.ucBitShift = (uint8_t)
1196 (start_index - ((start_index / 32) * 32));
1197 efuse_param.sEfuse.ucBitLength = (uint8_t)
1198 ((end_index - start_index) + 1);
1199
1200 result = cgs_atom_exec_cmd_table(device,
1201 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1202 &efuse_param);
1203 if (!result)
1204 *efuse = efuse_param.ulEfuseValue & mask;
1205
1206 return result;
1207}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
new file mode 100644
index 000000000000..627420b80a5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef PP_ATOMVOLTAGECTRL_H
25#define PP_ATOMVOLTAGECTRL_H
26
27#include "hwmgr.h"
28
29#define MEM_TYPE_GDDR5 0x50
30#define MEM_TYPE_GDDR4 0x40
31#define MEM_TYPE_GDDR3 0x30
32#define MEM_TYPE_DDR2 0x20
33#define MEM_TYPE_GDDR1 0x10
34#define MEM_TYPE_DDR3 0xb0
35#define MEM_TYPE_MASK 0xF0
36
37
38/* As returned from PowerConnectorDetectionTable. */
39#define PP_ATOM_POWER_BUDGET_DISABLE_OVERDRIVE 0x80
40#define PP_ATOM_POWER_BUDGET_SHOW_WARNING 0x40
41#define PP_ATOM_POWER_BUDGET_SHOW_WAIVER 0x20
42#define PP_ATOM_POWER_POWER_BUDGET_BEHAVIOUR 0x0F
43
44/* New functions for Evergreen and beyond. */
45#define PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES 32
46
47struct pp_atomctrl_clock_dividers {
48 uint32_t pll_post_divider;
49 uint32_t pll_feedback_divider;
50 uint32_t pll_ref_divider;
51 bool enable_post_divider;
52};
53
54typedef struct pp_atomctrl_clock_dividers pp_atomctrl_clock_dividers;
55
56union pp_atomctrl_tcipll_fb_divider {
57 struct {
58 uint32_t ul_fb_div_frac : 14;
59 uint32_t ul_fb_div : 12;
60 uint32_t un_used : 6;
61 };
62 uint32_t ul_fb_divider;
63};
64
65typedef union pp_atomctrl_tcipll_fb_divider pp_atomctrl_tcipll_fb_divider;
66
67struct pp_atomctrl_clock_dividers_rv730 {
68 uint32_t pll_post_divider;
69 pp_atomctrl_tcipll_fb_divider mpll_feedback_divider;
70 uint32_t pll_ref_divider;
71 bool enable_post_divider;
72 bool enable_dithen;
73 uint32_t vco_mode;
74};
75typedef struct pp_atomctrl_clock_dividers_rv730 pp_atomctrl_clock_dividers_rv730;
76
77
78struct pp_atomctrl_clock_dividers_kong {
79 uint32_t pll_post_divider;
80 uint32_t real_clock;
81};
82typedef struct pp_atomctrl_clock_dividers_kong pp_atomctrl_clock_dividers_kong;
83
84struct pp_atomctrl_clock_dividers_ci {
85 uint32_t pll_post_divider; /* post divider value */
86 uint32_t real_clock;
87 pp_atomctrl_tcipll_fb_divider ul_fb_div; /* Output Parameter: PLL FB divider */
88 uint8_t uc_pll_ref_div; /* Output Parameter: PLL ref divider */
89 uint8_t uc_pll_post_div; /* Output Parameter: PLL post divider */
90 uint8_t uc_pll_cntl_flag; /*Output Flags: control flag */
91};
92typedef struct pp_atomctrl_clock_dividers_ci pp_atomctrl_clock_dividers_ci;
93
94struct pp_atomctrl_clock_dividers_vi {
95 uint32_t pll_post_divider; /* post divider value */
96 uint32_t real_clock;
97 pp_atomctrl_tcipll_fb_divider ul_fb_div; /*Output Parameter: PLL FB divider */
98 uint8_t uc_pll_ref_div; /*Output Parameter: PLL ref divider */
99 uint8_t uc_pll_post_div; /*Output Parameter: PLL post divider */
100 uint8_t uc_pll_cntl_flag; /*Output Flags: control flag */
101};
102typedef struct pp_atomctrl_clock_dividers_vi pp_atomctrl_clock_dividers_vi;
103
104union pp_atomctrl_s_mpll_fb_divider {
105 struct {
106 uint32_t cl_kf : 12;
107 uint32_t clk_frac : 12;
108 uint32_t un_used : 8;
109 };
110 uint32_t ul_fb_divider;
111};
112typedef union pp_atomctrl_s_mpll_fb_divider pp_atomctrl_s_mpll_fb_divider;
113
114enum pp_atomctrl_spread_spectrum_mode {
115 pp_atomctrl_spread_spectrum_mode_down = 0,
116 pp_atomctrl_spread_spectrum_mode_center
117};
118typedef enum pp_atomctrl_spread_spectrum_mode pp_atomctrl_spread_spectrum_mode;
119
120struct pp_atomctrl_memory_clock_param {
121 pp_atomctrl_s_mpll_fb_divider mpll_fb_divider;
122 uint32_t mpll_post_divider;
123 uint32_t bw_ctrl;
124 uint32_t dll_speed;
125 uint32_t vco_mode;
126 uint32_t yclk_sel;
127 uint32_t qdr;
128 uint32_t half_rate;
129};
130typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param;
131
132struct pp_atomctrl_internal_ss_info {
133 uint32_t speed_spectrum_percentage; /* in 1/100 percentage */
134 uint32_t speed_spectrum_rate; /* in KHz */
135 pp_atomctrl_spread_spectrum_mode speed_spectrum_mode;
136};
137typedef struct pp_atomctrl_internal_ss_info pp_atomctrl_internal_ss_info;
138
139#ifndef NUMBER_OF_M3ARB_PARAMS
140#define NUMBER_OF_M3ARB_PARAMS 3
141#endif
142
143#ifndef NUMBER_OF_M3ARB_PARAM_SETS
144#define NUMBER_OF_M3ARB_PARAM_SETS 10
145#endif
146
147struct pp_atomctrl_kong_system_info {
148 uint32_t ul_bootup_uma_clock; /* in 10kHz unit */
149 uint16_t us_max_nb_voltage; /* high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; */
150 uint16_t us_min_nb_voltage; /* low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; */
151 uint16_t us_bootup_nb_voltage; /* boot up NB voltage */
152 uint8_t uc_htc_tmp_lmt; /* bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD */
153 uint8_t uc_tj_offset; /* bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD */
154 /* 0: default 1: uvd 2: fs-3d */
155 uint32_t ul_csr_m3_srb_cntl[NUMBER_OF_M3ARB_PARAM_SETS][NUMBER_OF_M3ARB_PARAMS];/* arrays with values for CSR M3 arbiter for default */
156};
157typedef struct pp_atomctrl_kong_system_info pp_atomctrl_kong_system_info;
158
159struct pp_atomctrl_memory_info {
160 uint8_t memory_vendor;
161 uint8_t memory_type;
162};
163typedef struct pp_atomctrl_memory_info pp_atomctrl_memory_info;
164
165#define MAX_AC_TIMING_ENTRIES 16
166
167struct pp_atomctrl_memory_clock_range_table {
168 uint8_t num_entries;
169 uint8_t rsv[3];
170
171 uint32_t mclk[MAX_AC_TIMING_ENTRIES];
172};
173typedef struct pp_atomctrl_memory_clock_range_table pp_atomctrl_memory_clock_range_table;
174
175struct pp_atomctrl_voltage_table_entry {
176 uint16_t value;
177 uint32_t smio_low;
178};
179
180typedef struct pp_atomctrl_voltage_table_entry pp_atomctrl_voltage_table_entry;
181
182struct pp_atomctrl_voltage_table {
183 uint32_t count;
184 uint32_t mask_low;
185 uint32_t phase_delay; /* Used for ATOM_GPIO_VOLTAGE_OBJECT_V3 and later */
186 pp_atomctrl_voltage_table_entry entries[PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES];
187};
188
189typedef struct pp_atomctrl_voltage_table pp_atomctrl_voltage_table;
190
191#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
192#define VBIOS_MAX_AC_TIMING_ENTRIES 20
193
194struct pp_atomctrl_mc_reg_entry {
195 uint32_t mclk_max;
196 uint32_t mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
197};
198typedef struct pp_atomctrl_mc_reg_entry pp_atomctrl_mc_reg_entry;
199
200struct pp_atomctrl_mc_register_address {
201 uint16_t s1;
202 uint8_t uc_pre_reg_data;
203};
204
205typedef struct pp_atomctrl_mc_register_address pp_atomctrl_mc_register_address;
206
207struct pp_atomctrl_mc_reg_table {
208 uint8_t last; /* number of registers */
209 uint8_t num_entries; /* number of AC timing entries */
210 pp_atomctrl_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
211 pp_atomctrl_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
212};
213typedef struct pp_atomctrl_mc_reg_table pp_atomctrl_mc_reg_table;
214
215struct pp_atomctrl_gpio_pin_assignment {
216 uint16_t us_gpio_pin_aindex;
217 uint8_t uc_gpio_pin_bit_shift;
218};
219typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
220
221extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
222extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
223extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
224extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo);
225extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
226extern int atomctrl_initialize_mc_reg_table(struct pp_hwmgr *hwmgr, uint8_t module_index, pp_atomctrl_mc_reg_table *table);
227extern int atomctrl_set_engine_dram_timings_rv770(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint32_t memory_clock);
228extern uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr);
229extern int atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode);
230extern int atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
231extern int atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
232extern bool atomctrl_is_voltage_controled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode);
233extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
234extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
235 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
236extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
237 uint32_t clock_value,
238 pp_atomctrl_clock_dividers_kong *dividers);
239extern int atomctrl_read_efuse(void *device, uint16_t start_index,
240 uint16_t end_index, uint32_t mask, uint32_t *efuse);
241extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
242 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
243
244
245#endif
246
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
new file mode 100644
index 000000000000..b7429a527828
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -0,0 +1,612 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <asm/div64.h>
24
25#define SHIFT_AMOUNT 16 /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
26
27#define PRECISION 5 /* Change this value to change the number of decimal places in the final output - 5 is a good default */
28
29#define SHIFTED_2 (2 << SHIFT_AMOUNT)
30#define MAX (1 << (SHIFT_AMOUNT - 1)) - 1 /* 32767 - Might change in the future */
31
32/* -------------------------------------------------------------------------------
33 * NEW TYPE - fINT
34 * -------------------------------------------------------------------------------
35 * A variable of type fInt can be accessed in 3 ways using the dot (.) operator
36 * fInt A;
37 * A.full => The full number as it is. Generally not easy to read
38 * A.partial.real => Only the integer portion
39 * A.partial.decimal => Only the fractional portion
40 */
41typedef union _fInt {
42 int full;
43 struct _partial {
44 unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/
45 int real: 32 - SHIFT_AMOUNT;
46 } partial;
47} fInt;
48
49/* -------------------------------------------------------------------------------
50 * Function Declarations
51 * -------------------------------------------------------------------------------
52 */
53fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
54fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
55fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
56int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
57
58fInt fNegate(fInt); /* Returns -1 * input fInt value */
59fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
60fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
61fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
62fInt fDivide (fInt A, fInt B); /* Returns A/B */
63fInt fGetSquare(fInt); /* Returns the square of a fInt number */
64fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
65
66int uAbs(int); /* Returns the Absolute value of the Int */
67fInt fAbs(fInt); /* Returns the Absolute value of the fInt */
68int uPow(int base, int exponent); /* Returns base^exponent an INT */
69
70void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
71bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
72bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
73
74fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
75fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
76
77/* Fuse decoding functions
78 * -------------------------------------------------------------------------------------
79 */
80fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
81fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
82fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
83
84/* Internal Support Functions - Use these ONLY for testing or adding to internal functions
85 * -------------------------------------------------------------------------------------
86 * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons.
87 */
88fInt Add (int, int); /* Add two INTs and return Sum as FINT */
89fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */
90fInt Divide (int, int); /* You get the idea... */
91fInt fNegate(fInt);
92
93int uGetScaledDecimal (fInt); /* Internal function */
94int GetReal (fInt A); /* Internal function */
95
96/* Future Additions and Incomplete Functions
97 * -------------------------------------------------------------------------------------
98 */
99int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */
100 /* Let us say we have 2.126 but can only handle 2 decimal points. We could */
101 /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */
102
103/* -------------------------------------------------------------------------------------
104 * TROUBLESHOOTING INFORMATION
105 * -------------------------------------------------------------------------------------
106 * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767)
107 * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767)
108 * 3) fMultiply - OutputOutOfRangeException:
109 * 4) fGetSquare - OutputOutOfRangeException:
110 * 5) fDivide - DivideByZeroException
111 * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number
112 */
113
114/* -------------------------------------------------------------------------------------
115 * START OF CODE
116 * -------------------------------------------------------------------------------------
117 */
118fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
119{
120 uint32_t i;
121 bool bNegated = false;
122
123 fInt fPositiveOne = ConvertToFraction(1);
124 fInt fZERO = ConvertToFraction(0);
125
126 fInt lower_bound = Divide(78, 10000);
127 fInt solution = fPositiveOne; /*Starting off with baseline of 1 */
128 fInt error_term;
129
130 uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
131 uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
132
133 if (GreaterThan(fZERO, exponent)) {
134 exponent = fNegate(exponent);
135 bNegated = true;
136 }
137
138 while (GreaterThan(exponent, lower_bound)) {
139 for (i = 0; i < 11; i++) {
140 if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) {
141 exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000));
142 solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000));
143 }
144 }
145 }
146
147 error_term = fAdd(fPositiveOne, exponent);
148
149 solution = fMultiply(solution, error_term);
150
151 if (bNegated)
152 solution = fDivide(fPositiveOne, solution);
153
154 return solution;
155}
156
157fInt fNaturalLog(fInt value)
158{
159 uint32_t i;
160 fInt upper_bound = Divide(8, 1000);
161 fInt fNegativeOne = ConvertToFraction(-1);
162 fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */
163 fInt error_term;
164
165 uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
166 uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
167
168 while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) {
169 for (i = 0; i < 10; i++) {
170 if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) {
171 value = fDivide(value, GetScaledFraction(k_array[i], 10000));
172 solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000));
173 }
174 }
175 }
176
177 error_term = fAdd(fNegativeOne, value);
178
179 return (fAdd(solution, error_term));
180}
181
182fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
183{
184 fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
185 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
186
187 fInt f_decoded_value;
188
189 f_decoded_value = fDivide(f_fuse_value, f_bit_max_value);
190 f_decoded_value = fMultiply(f_decoded_value, f_range);
191 f_decoded_value = fAdd(f_decoded_value, f_min);
192
193 return f_decoded_value;
194}
195
196
197fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
198{
199 fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
200 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
201
202 fInt f_CONSTANT_NEG13 = ConvertToFraction(-13);
203 fInt f_CONSTANT1 = ConvertToFraction(1);
204
205 fInt f_decoded_value;
206
207 f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1);
208 f_decoded_value = fNaturalLog(f_decoded_value);
209 f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13));
210 f_decoded_value = fAdd(f_decoded_value, f_average);
211
212 return f_decoded_value;
213}
214
215fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
216{
217 fInt fLeakage;
218 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
219
220 fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse));
221 fLeakage = fDivide(fLeakage, f_bit_max_value);
222 fLeakage = fExponential(fLeakage);
223 fLeakage = fMultiply(fLeakage, f_min);
224
225 return fLeakage;
226}
227
228fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
229{
230 fInt temp;
231
232 if (X <= MAX)
233 temp.full = (X << SHIFT_AMOUNT);
234 else
235 temp.full = 0;
236
237 return temp;
238}
239
240fInt fNegate(fInt X)
241{
242 fInt CONSTANT_NEGONE = ConvertToFraction(-1);
243 return (fMultiply(X, CONSTANT_NEGONE));
244}
245
246fInt Convert_ULONG_ToFraction(uint32_t X)
247{
248 fInt temp;
249
250 if (X <= MAX)
251 temp.full = (X << SHIFT_AMOUNT);
252 else
253 temp.full = 0;
254
255 return temp;
256}
257
258fInt GetScaledFraction(int X, int factor)
259{
260 int times_shifted, factor_shifted;
261 bool bNEGATED;
262 fInt fValue;
263
264 times_shifted = 0;
265 factor_shifted = 0;
266 bNEGATED = false;
267
268 if (X < 0) {
269 X = -1*X;
270 bNEGATED = true;
271 }
272
273 if (factor < 0) {
274 factor = -1*factor;
275 bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */
276 }
277
278 if ((X > MAX) || factor > MAX) {
279 if ((X/factor) <= MAX) {
280 while (X > MAX) {
281 X = X >> 1;
282 times_shifted++;
283 }
284
285 while (factor > MAX) {
286 factor = factor >> 1;
287 factor_shifted++;
288 }
289 } else {
290 fValue.full = 0;
291 return fValue;
292 }
293 }
294
295 if (factor == 1)
296 return (ConvertToFraction(X));
297
298 fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor));
299
300 fValue.full = fValue.full << times_shifted;
301 fValue.full = fValue.full >> factor_shifted;
302
303 return fValue;
304}
305
306/* Addition using two fInts */
307fInt fAdd (fInt X, fInt Y)
308{
309 fInt Sum;
310
311 Sum.full = X.full + Y.full;
312
313 return Sum;
314}
315
316/* Addition using two fInts */
317fInt fSubtract (fInt X, fInt Y)
318{
319 fInt Difference;
320
321 Difference.full = X.full - Y.full;
322
323 return Difference;
324}
325
326bool Equal(fInt A, fInt B)
327{
328 if (A.full == B.full)
329 return true;
330 else
331 return false;
332}
333
334bool GreaterThan(fInt A, fInt B)
335{
336 if (A.full > B.full)
337 return true;
338 else
339 return false;
340}
341
342fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
343{
344 fInt Product;
345 int64_t tempProduct;
346 bool X_LessThanOne, Y_LessThanOne;
347
348 X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0);
349 Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0);
350
351 /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/
352 /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION
353
354 if (X_LessThanOne && Y_LessThanOne) {
355 Product.full = X.full * Y.full;
356 return Product
357 }*/
358
359 tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */
360 tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */
361 Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */
362
363 return Product;
364}
365
366fInt fDivide (fInt X, fInt Y)
367{
368 fInt fZERO, fQuotient;
369 int64_t longlongX, longlongY;
370
371 fZERO = ConvertToFraction(0);
372
373 if (Equal(Y, fZERO))
374 return fZERO;
375
376 longlongX = (int64_t)X.full;
377 longlongY = (int64_t)Y.full;
378
379 longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */
380
381 div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */
382
383 fQuotient.full = (int)longlongX;
384 return fQuotient;
385}
386
387int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
388{
389 fInt fullNumber, scaledDecimal, scaledReal;
390
391 scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */
392
393 scaledDecimal.full = uGetScaledDecimal(A);
394
395 fullNumber = fAdd(scaledDecimal,scaledReal);
396
397 return fullNumber.full;
398}
399
400fInt fGetSquare(fInt A)
401{
402 return fMultiply(A,A);
403}
404
405/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
406fInt fSqrt(fInt num)
407{
408 fInt F_divide_Fprime, Fprime;
409 fInt test;
410 fInt twoShifted;
411 int seed, counter, error;
412 fInt x_new, x_old, C, y;
413
414 fInt fZERO = ConvertToFraction(0);
415
416 /* (0 > num) is the same as (num < 0), i.e., num is negative */
417
418 if (GreaterThan(fZERO, num) || Equal(fZERO, num))
419 return fZERO;
420
421 C = num;
422
423 if (num.partial.real > 3000)
424 seed = 60;
425 else if (num.partial.real > 1000)
426 seed = 30;
427 else if (num.partial.real > 100)
428 seed = 10;
429 else
430 seed = 2;
431
432 counter = 0;
433
434 if (Equal(num, fZERO)) /*Square Root of Zero is zero */
435 return fZERO;
436
437 twoShifted = ConvertToFraction(2);
438 x_new = ConvertToFraction(seed);
439
440 do {
441 counter++;
442
443 x_old.full = x_new.full;
444
445 test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */
446 y = fSubtract(test, C); /*y = f(x) = x^2 - C; */
447
448 Fprime = fMultiply(twoShifted, x_old);
449 F_divide_Fprime = fDivide(y, Fprime);
450
451 x_new = fSubtract(x_old, F_divide_Fprime);
452
453 error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old);
454
455 if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/
456 return x_new;
457
458 } while (uAbs(error) > 0);
459
460 return (x_new);
461}
462
463void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
464{
465 fInt *pRoots = &Roots[0];
466 fInt temp, root_first, root_second;
467 fInt f_CONSTANT10, f_CONSTANT100;
468
469 f_CONSTANT100 = ConvertToFraction(100);
470 f_CONSTANT10 = ConvertToFraction(10);
471
472 while(GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) {
473 A = fDivide(A, f_CONSTANT10);
474 B = fDivide(B, f_CONSTANT10);
475 C = fDivide(C, f_CONSTANT10);
476 }
477
478 temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */
479 temp = fMultiply(temp, C); /* root = 4*A*C */
480 temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */
481 temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */
482
483 root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */
484 root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */
485
486 root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */
487 root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */
488
489 root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */
490 root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */
491
492 *(pRoots + 0) = root_first;
493 *(pRoots + 1) = root_second;
494}
495
496/* -----------------------------------------------------------------------------
497 * SUPPORT FUNCTIONS
498 * -----------------------------------------------------------------------------
499 */
500
501/* Addition using two normal ints - Temporary - Use only for testing purposes?. */
502fInt Add (int X, int Y)
503{
504 fInt A, B, Sum;
505
506 A.full = (X << SHIFT_AMOUNT);
507 B.full = (Y << SHIFT_AMOUNT);
508
509 Sum.full = A.full + B.full;
510
511 return Sum;
512}
513
514/* Conversion Functions */
515int GetReal (fInt A)
516{
517 return (A.full >> SHIFT_AMOUNT);
518}
519
520/* Temporarily Disabled */
521int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */
522{
523 /* ROUNDING TEMPORARLY DISABLED
524 int temp = A.full;
525 int decimal_cutoff, decimal_mask = 0x000001FF;
526 decimal_cutoff = temp & decimal_mask;
527 if (decimal_cutoff > 0x147) {
528 temp += 673;
529 }*/
530
531 return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */
532}
533
534fInt Multiply (int X, int Y)
535{
536 fInt A, B, Product;
537
538 A.full = X << SHIFT_AMOUNT;
539 B.full = Y << SHIFT_AMOUNT;
540
541 Product = fMultiply(A, B);
542
543 return Product;
544}
545
546fInt Divide (int X, int Y)
547{
548 fInt A, B, Quotient;
549
550 A.full = X << SHIFT_AMOUNT;
551 B.full = Y << SHIFT_AMOUNT;
552
553 Quotient = fDivide(A, B);
554
555 return Quotient;
556}
557
558int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
559{
560 int dec[PRECISION];
561 int i, scaledDecimal = 0, tmp = A.partial.decimal;
562
563 for (i = 0; i < PRECISION; i++) {
564 dec[i] = tmp / (1 << SHIFT_AMOUNT);
565 tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]);
566 tmp *= 10;
567 scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 -i);
568 }
569
570 return scaledDecimal;
571}
572
573int uPow(int base, int power)
574{
575 if (power == 0)
576 return 1;
577 else
578 return (base)*uPow(base, power - 1);
579}
580
581fInt fAbs(fInt A)
582{
583 if (A.partial.real < 0)
584 return (fMultiply(A, ConvertToFraction(-1)));
585 else
586 return A;
587}
588
589int uAbs(int X)
590{
591 if (X < 0)
592 return (X * -1);
593 else
594 return X;
595}
596
597fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
598{
599 fInt solution;
600
601 solution = fDivide(A, fStepSize);
602 solution.partial.decimal = 0; /*All fractional digits changes to 0 */
603
604 if (error_term)
605 solution.partial.real += 1; /*Error term of 1 added */
606
607 solution = fMultiply(solution, fStepSize);
608 solution = fAdd(solution, fStepSize);
609
610 return solution;
611}
612
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c
new file mode 100644
index 000000000000..186496a34cbe
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/types.h>
25#include "atom-types.h"
26#include "atombios.h"
27#include "pppcielanes.h"
28
29/** \file
30 * Functions related to PCIe lane changes.
31 */
32
33/* For converting from number of lanes to lane bits. */
34static const unsigned char pp_r600_encode_lanes[] = {
35 0, /* 0 Not Supported */
36 1, /* 1 Lane */
37 2, /* 2 Lanes */
38 0, /* 3 Not Supported */
39 3, /* 4 Lanes */
40 0, /* 5 Not Supported */
41 0, /* 6 Not Supported */
42 0, /* 7 Not Supported */
43 4, /* 8 Lanes */
44 0, /* 9 Not Supported */
45 0, /* 10 Not Supported */
46 0, /* 11 Not Supported */
47 5, /* 12 Lanes (Not actually supported) */
48 0, /* 13 Not Supported */
49 0, /* 14 Not Supported */
50 0, /* 15 Not Supported */
51 6 /* 16 Lanes */
52};
53
54static const unsigned char pp_r600_decoded_lanes[8] = { 16, 1, 2, 4, 8, 12, 16, };
55
56uint8_t encode_pcie_lane_width(uint32_t num_lanes)
57{
58 return pp_r600_encode_lanes[num_lanes];
59}
60
61uint8_t decode_pcie_lane_width(uint32_t num_lanes)
62{
63 return pp_r600_decoded_lanes[num_lanes];
64}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h
new file mode 100644
index 000000000000..70b163b35570
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef PP_PCIELANES_H
25#define PP_PCIELANES_H
26
27extern uint8_t encode_pcie_lane_width(uint32_t num_lanes);
28extern uint8_t decode_pcie_lane_width(uint32_t num_lanes);
29
30#endif
31
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
new file mode 100644
index 000000000000..2f1a14fe05b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -0,0 +1,1688 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26
27#include "processpptables.h"
28#include <atom-types.h>
29#include <atombios.h>
30#include "pp_debug.h"
31#include "pptable.h"
32#include "power_state.h"
33#include "hwmgr.h"
34#include "hardwaremanager.h"
35
36
37#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
38#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
39#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
40#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
41#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
42#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
43#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
44#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
45
46#define NUM_BITS_CLOCK_INFO_ARRAY_INDEX 6
47
48static uint16_t get_vce_table_offset(struct pp_hwmgr *hwmgr,
49 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
50{
51 uint16_t vce_table_offset = 0;
52
53 if (le16_to_cpu(powerplay_table->usTableSize) >=
54 sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
55 const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
56 (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
57
58 if (powerplay_table3->usExtendendedHeaderOffset > 0) {
59 const ATOM_PPLIB_EXTENDEDHEADER *extended_header =
60 (const ATOM_PPLIB_EXTENDEDHEADER *)
61 (((unsigned long)powerplay_table3) +
62 le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset));
63 if (le16_to_cpu(extended_header->usSize) >=
64 SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2)
65 vce_table_offset = le16_to_cpu(extended_header->usVCETableOffset);
66 }
67 }
68
69 return vce_table_offset;
70}
71
72static uint16_t get_vce_clock_info_array_offset(struct pp_hwmgr *hwmgr,
73 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
74{
75 uint16_t table_offset = get_vce_table_offset(hwmgr,
76 powerplay_table);
77
78 if (table_offset > 0)
79 return table_offset + 1;
80
81 return 0;
82}
83
84static uint16_t get_vce_clock_info_array_size(struct pp_hwmgr *hwmgr,
85 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
86{
87 uint16_t table_offset = get_vce_clock_info_array_offset(hwmgr,
88 powerplay_table);
89 uint16_t table_size = 0;
90
91 if (table_offset > 0) {
92 const VCEClockInfoArray *p = (const VCEClockInfoArray *)
93 (((unsigned long) powerplay_table) + table_offset);
94 table_size = sizeof(uint8_t) + p->ucNumEntries * sizeof(VCEClockInfo);
95 }
96
97 return table_size;
98}
99
100static uint16_t get_vce_clock_voltage_limit_table_offset(struct pp_hwmgr *hwmgr,
101 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
102{
103 uint16_t table_offset = get_vce_clock_info_array_offset(hwmgr,
104 powerplay_table);
105
106 if (table_offset > 0)
107 return table_offset + get_vce_clock_info_array_size(hwmgr,
108 powerplay_table);
109
110 return 0;
111}
112
113static uint16_t get_vce_clock_voltage_limit_table_size(struct pp_hwmgr *hwmgr,
114 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
115{
116 uint16_t table_offset = get_vce_clock_voltage_limit_table_offset(hwmgr, powerplay_table);
117 uint16_t table_size = 0;
118
119 if (table_offset > 0) {
120 const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *ptable =
121 (const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)(((unsigned long) powerplay_table) + table_offset);
122
123 table_size = sizeof(uint8_t) + ptable->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record);
124 }
125 return table_size;
126}
127
128static uint16_t get_vce_state_table_offset(struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
129{
130 uint16_t table_offset = get_vce_clock_voltage_limit_table_offset(hwmgr, powerplay_table);
131
132 if (table_offset > 0)
133 return table_offset + get_vce_clock_voltage_limit_table_size(hwmgr, powerplay_table);
134
135 return 0;
136}
137
138static const ATOM_PPLIB_VCE_State_Table *get_vce_state_table(
139 struct pp_hwmgr *hwmgr,
140 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
141{
142 uint16_t table_offset = get_vce_state_table_offset(hwmgr, powerplay_table);
143
144 if (table_offset > 0)
145 return (const ATOM_PPLIB_VCE_State_Table *)(((unsigned long) powerplay_table) + table_offset);
146
147 return NULL;
148}
149
150static uint16_t get_uvd_table_offset(struct pp_hwmgr *hwmgr,
151 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
152{
153 uint16_t uvd_table_offset = 0;
154
155 if (le16_to_cpu(powerplay_table->usTableSize) >=
156 sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
157 const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
158 (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
159 if (powerplay_table3->usExtendendedHeaderOffset > 0) {
160 const ATOM_PPLIB_EXTENDEDHEADER *extended_header =
161 (const ATOM_PPLIB_EXTENDEDHEADER *)
162 (((unsigned long)powerplay_table3) +
163 le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset));
164 if (le16_to_cpu(extended_header->usSize) >=
165 SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3)
166 uvd_table_offset = le16_to_cpu(extended_header->usUVDTableOffset);
167 }
168 }
169 return uvd_table_offset;
170}
171
172static uint16_t get_uvd_clock_info_array_offset(struct pp_hwmgr *hwmgr,
173 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
174{
175 uint16_t table_offset = get_uvd_table_offset(hwmgr,
176 powerplay_table);
177
178 if (table_offset > 0)
179 return table_offset + 1;
180 return 0;
181}
182
183static uint16_t get_uvd_clock_info_array_size(struct pp_hwmgr *hwmgr,
184 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
185{
186 uint16_t table_offset = get_uvd_clock_info_array_offset(hwmgr,
187 powerplay_table);
188 uint16_t table_size = 0;
189
190 if (table_offset > 0) {
191 const UVDClockInfoArray *p = (const UVDClockInfoArray *)
192 (((unsigned long) powerplay_table)
193 + table_offset);
194 table_size = sizeof(UCHAR) +
195 p->ucNumEntries * sizeof(UVDClockInfo);
196 }
197
198 return table_size;
199}
200
201static uint16_t get_uvd_clock_voltage_limit_table_offset(
202 struct pp_hwmgr *hwmgr,
203 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
204{
205 uint16_t table_offset = get_uvd_clock_info_array_offset(hwmgr,
206 powerplay_table);
207
208 if (table_offset > 0)
209 return table_offset +
210 get_uvd_clock_info_array_size(hwmgr, powerplay_table);
211
212 return 0;
213}
214
215static uint16_t get_samu_table_offset(struct pp_hwmgr *hwmgr,
216 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
217{
218 uint16_t samu_table_offset = 0;
219
220 if (le16_to_cpu(powerplay_table->usTableSize) >=
221 sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
222 const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
223 (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
224 if (powerplay_table3->usExtendendedHeaderOffset > 0) {
225 const ATOM_PPLIB_EXTENDEDHEADER *extended_header =
226 (const ATOM_PPLIB_EXTENDEDHEADER *)
227 (((unsigned long)powerplay_table3) +
228 le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset));
229 if (le16_to_cpu(extended_header->usSize) >=
230 SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4)
231 samu_table_offset = le16_to_cpu(extended_header->usSAMUTableOffset);
232 }
233 }
234
235 return samu_table_offset;
236}
237
238static uint16_t get_samu_clock_voltage_limit_table_offset(
239 struct pp_hwmgr *hwmgr,
240 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
241{
242 uint16_t table_offset = get_samu_table_offset(hwmgr,
243 powerplay_table);
244
245 if (table_offset > 0)
246 return table_offset + 1;
247
248 return 0;
249}
250
251static uint16_t get_acp_table_offset(struct pp_hwmgr *hwmgr,
252 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
253{
254 uint16_t acp_table_offset = 0;
255
256 if (le16_to_cpu(powerplay_table->usTableSize) >=
257 sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
258 const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
259 (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
260 if (powerplay_table3->usExtendendedHeaderOffset > 0) {
261 const ATOM_PPLIB_EXTENDEDHEADER *pExtendedHeader =
262 (const ATOM_PPLIB_EXTENDEDHEADER *)
263 (((unsigned long)powerplay_table3) +
264 le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset));
265 if (le16_to_cpu(pExtendedHeader->usSize) >=
266 SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6)
267 acp_table_offset = le16_to_cpu(pExtendedHeader->usACPTableOffset);
268 }
269 }
270
271 return acp_table_offset;
272}
273
274static uint16_t get_acp_clock_voltage_limit_table_offset(
275 struct pp_hwmgr *hwmgr,
276 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
277{
278 uint16_t tableOffset = get_acp_table_offset(hwmgr, powerplay_table);
279
280 if (tableOffset > 0)
281 return tableOffset + 1;
282
283 return 0;
284}
285
286static uint16_t get_cacp_tdp_table_offset(
287 struct pp_hwmgr *hwmgr,
288 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
289{
290 uint16_t cacTdpTableOffset = 0;
291
292 if (le16_to_cpu(powerplay_table->usTableSize) >=
293 sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
294 const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
295 (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
296 if (powerplay_table3->usExtendendedHeaderOffset > 0) {
297 const ATOM_PPLIB_EXTENDEDHEADER *pExtendedHeader =
298 (const ATOM_PPLIB_EXTENDEDHEADER *)
299 (((unsigned long)powerplay_table3) +
300 le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset));
301 if (le16_to_cpu(pExtendedHeader->usSize) >=
302 SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7)
303 cacTdpTableOffset = le16_to_cpu(pExtendedHeader->usPowerTuneTableOffset);
304 }
305 }
306
307 return cacTdpTableOffset;
308}
309
310static int get_cac_tdp_table(struct pp_hwmgr *hwmgr,
311 struct phm_cac_tdp_table **ptable,
312 const ATOM_PowerTune_Table *table,
313 uint16_t us_maximum_power_delivery_limit)
314{
315 unsigned long table_size;
316 struct phm_cac_tdp_table *tdp_table;
317
318 table_size = sizeof(unsigned long) + sizeof(struct phm_cac_tdp_table);
319
320 tdp_table = kzalloc(table_size, GFP_KERNEL);
321 if (NULL == tdp_table)
322 return -ENOMEM;
323
324 tdp_table->usTDP = le16_to_cpu(table->usTDP);
325 tdp_table->usConfigurableTDP = le16_to_cpu(table->usConfigurableTDP);
326 tdp_table->usTDC = le16_to_cpu(table->usTDC);
327 tdp_table->usBatteryPowerLimit = le16_to_cpu(table->usBatteryPowerLimit);
328 tdp_table->usSmallPowerLimit = le16_to_cpu(table->usSmallPowerLimit);
329 tdp_table->usLowCACLeakage = le16_to_cpu(table->usLowCACLeakage);
330 tdp_table->usHighCACLeakage = le16_to_cpu(table->usHighCACLeakage);
331 tdp_table->usMaximumPowerDeliveryLimit = us_maximum_power_delivery_limit;
332
333 *ptable = tdp_table;
334
335 return 0;
336}
337
338static uint16_t get_sclk_vdd_gfx_table_offset(struct pp_hwmgr *hwmgr,
339 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
340{
341 uint16_t sclk_vdd_gfx_table_offset = 0;
342
343 if (le16_to_cpu(powerplay_table->usTableSize) >=
344 sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
345 const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
346 (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
347 if (powerplay_table3->usExtendendedHeaderOffset > 0) {
348 const ATOM_PPLIB_EXTENDEDHEADER *pExtendedHeader =
349 (const ATOM_PPLIB_EXTENDEDHEADER *)
350 (((unsigned long)powerplay_table3) +
351 le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset));
352 if (le16_to_cpu(pExtendedHeader->usSize) >=
353 SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8)
354 sclk_vdd_gfx_table_offset =
355 le16_to_cpu(pExtendedHeader->usSclkVddgfxTableOffset);
356 }
357 }
358
359 return sclk_vdd_gfx_table_offset;
360}
361
362static uint16_t get_sclk_vdd_gfx_clock_voltage_dependency_table_offset(
363 struct pp_hwmgr *hwmgr,
364 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
365{
366 uint16_t tableOffset = get_sclk_vdd_gfx_table_offset(hwmgr, powerplay_table);
367
368 if (tableOffset > 0)
369 return tableOffset;
370
371 return 0;
372}
373
374
375static int get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
376 struct phm_clock_voltage_dependency_table **ptable,
377 const ATOM_PPLIB_Clock_Voltage_Dependency_Table *table)
378{
379
380 unsigned long table_size, i;
381 struct phm_clock_voltage_dependency_table *dep_table;
382
383 table_size = sizeof(unsigned long) +
384 sizeof(struct phm_clock_voltage_dependency_table)
385 * table->ucNumEntries;
386
387 dep_table = kzalloc(table_size, GFP_KERNEL);
388 if (NULL == dep_table)
389 return -ENOMEM;
390
391 dep_table->count = (unsigned long)table->ucNumEntries;
392
393 for (i = 0; i < dep_table->count; i++) {
394 dep_table->entries[i].clk =
395 ((unsigned long)table->entries[i].ucClockHigh << 16) |
396 le16_to_cpu(table->entries[i].usClockLow);
397 dep_table->entries[i].v =
398 (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
399 }
400
401 *ptable = dep_table;
402
403 return 0;
404}
405
406static int get_valid_clk(struct pp_hwmgr *hwmgr,
407 struct phm_clock_array **ptable,
408 const struct phm_clock_voltage_dependency_table *table)
409{
410 unsigned long table_size, i;
411 struct phm_clock_array *clock_table;
412
413 table_size = sizeof(unsigned long) + sizeof(unsigned long) * table->count;
414 clock_table = kzalloc(table_size, GFP_KERNEL);
415 if (NULL == clock_table)
416 return -ENOMEM;
417
418 clock_table->count = (unsigned long)table->count;
419
420 for (i = 0; i < clock_table->count; i++)
421 clock_table->values[i] = (unsigned long)table->entries[i].clk;
422
423 *ptable = clock_table;
424
425 return 0;
426}
427
428static int get_clock_voltage_limit(struct pp_hwmgr *hwmgr,
429 struct phm_clock_and_voltage_limits *limits,
430 const ATOM_PPLIB_Clock_Voltage_Limit_Table *table)
431{
432 limits->sclk = ((unsigned long)table->entries[0].ucSclkHigh << 16) |
433 le16_to_cpu(table->entries[0].usSclkLow);
434 limits->mclk = ((unsigned long)table->entries[0].ucMclkHigh << 16) |
435 le16_to_cpu(table->entries[0].usMclkLow);
436 limits->vddc = (unsigned long)le16_to_cpu(table->entries[0].usVddc);
437 limits->vddci = (unsigned long)le16_to_cpu(table->entries[0].usVddci);
438
439 return 0;
440}
441
442
443static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
444 enum phm_platform_caps cap)
445{
446 if (enable)
447 phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
448 else
449 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
450}
451
452static int set_platform_caps(struct pp_hwmgr *hwmgr,
453 unsigned long powerplay_caps)
454{
455 set_hw_cap(
456 hwmgr,
457 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_POWERPLAY),
458 PHM_PlatformCaps_PowerPlaySupport
459 );
460
461 set_hw_cap(
462 hwmgr,
463 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
464 PHM_PlatformCaps_BiosPowerSourceControl
465 );
466
467 set_hw_cap(
468 hwmgr,
469 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s),
470 PHM_PlatformCaps_EnableASPML0s
471 );
472
473 set_hw_cap(
474 hwmgr,
475 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1),
476 PHM_PlatformCaps_EnableASPML1
477 );
478
479 set_hw_cap(
480 hwmgr,
481 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS),
482 PHM_PlatformCaps_EnableBackbias
483 );
484
485 set_hw_cap(
486 hwmgr,
487 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC),
488 PHM_PlatformCaps_AutomaticDCTransition
489 );
490
491 set_hw_cap(
492 hwmgr,
493 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY),
494 PHM_PlatformCaps_GeminiPrimary
495 );
496
497 set_hw_cap(
498 hwmgr,
499 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC),
500 PHM_PlatformCaps_StepVddc
501 );
502
503 set_hw_cap(
504 hwmgr,
505 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL),
506 PHM_PlatformCaps_EnableVoltageControl
507 );
508
509 set_hw_cap(
510 hwmgr,
511 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL),
512 PHM_PlatformCaps_EnableSideportControl
513 );
514
515 set_hw_cap(
516 hwmgr,
517 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1),
518 PHM_PlatformCaps_TurnOffPll_ASPML1
519 );
520
521 set_hw_cap(
522 hwmgr,
523 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_HTLINKCONTROL),
524 PHM_PlatformCaps_EnableHTLinkControl
525 );
526
527 set_hw_cap(
528 hwmgr,
529 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL),
530 PHM_PlatformCaps_EnableMVDDControl
531 );
532
533 set_hw_cap(
534 hwmgr,
535 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL),
536 PHM_PlatformCaps_ControlVDDCI
537 );
538
539 set_hw_cap(
540 hwmgr,
541 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT),
542 PHM_PlatformCaps_RegulatorHot
543 );
544
545 set_hw_cap(
546 hwmgr,
547 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT),
548 PHM_PlatformCaps_BootStateOnAlert
549 );
550
551 set_hw_cap(
552 hwmgr,
553 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT),
554 PHM_PlatformCaps_DontWaitForVBlankOnAlert
555 );
556
557 set_hw_cap(
558 hwmgr,
559 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_BACO),
560 PHM_PlatformCaps_BACO
561 );
562
563 set_hw_cap(
564 hwmgr,
565 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE),
566 PHM_PlatformCaps_NewCACVoltage
567 );
568
569 set_hw_cap(
570 hwmgr,
571 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY),
572 PHM_PlatformCaps_RevertGPIO5Polarity
573 );
574
575 set_hw_cap(
576 hwmgr,
577 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17),
578 PHM_PlatformCaps_Thermal2GPIO17
579 );
580
581 set_hw_cap(
582 hwmgr,
583 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE),
584 PHM_PlatformCaps_VRHotGPIOConfigurable
585 );
586
587 set_hw_cap(
588 hwmgr,
589 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_TEMP_INVERSION),
590 PHM_PlatformCaps_TempInversion
591 );
592
593 set_hw_cap(
594 hwmgr,
595 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_EVV),
596 PHM_PlatformCaps_EVV
597 );
598
599 set_hw_cap(
600 hwmgr,
601 0 != (powerplay_caps & ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL),
602 PHM_PlatformCaps_CombinePCCWithThermalSignal
603 );
604
605 set_hw_cap(
606 hwmgr,
607 0 != (powerplay_caps & ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE),
608 PHM_PlatformCaps_LoadPostProductionFirmware
609 );
610
611 set_hw_cap(
612 hwmgr,
613 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC),
614 PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc
615 );
616
617 return 0;
618}
619
620static PP_StateClassificationFlags make_classification_flags(
621 struct pp_hwmgr *hwmgr,
622 USHORT classification,
623 USHORT classification2)
624{
625 PP_StateClassificationFlags result = 0;
626
627 if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
628 result |= PP_StateClassificationFlag_Boot;
629
630 if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
631 result |= PP_StateClassificationFlag_Thermal;
632
633 if (classification &
634 ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
635 result |= PP_StateClassificationFlag_LimitedPowerSource;
636
637 if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
638 result |= PP_StateClassificationFlag_Rest;
639
640 if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
641 result |= PP_StateClassificationFlag_Forced;
642
643 if (classification & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
644 result |= PP_StateClassificationFlag_3DPerformance;
645
646
647 if (classification & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
648 result |= PP_StateClassificationFlag_ACOverdriveTemplate;
649
650 if (classification & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
651 result |= PP_StateClassificationFlag_Uvd;
652
653 if (classification & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
654 result |= PP_StateClassificationFlag_UvdHD;
655
656 if (classification & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
657 result |= PP_StateClassificationFlag_UvdSD;
658
659 if (classification & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
660 result |= PP_StateClassificationFlag_HD2;
661
662 if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
663 result |= PP_StateClassificationFlag_ACPI;
664
665 if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
666 result |= PP_StateClassificationFlag_LimitedPowerSource_2;
667
668
669 if (classification2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
670 result |= PP_StateClassificationFlag_ULV;
671
672 if (classification2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
673 result |= PP_StateClassificationFlag_UvdMVC;
674
675 return result;
676}
677
678static int init_non_clock_fields(struct pp_hwmgr *hwmgr,
679 struct pp_power_state *ps,
680 uint8_t version,
681 const ATOM_PPLIB_NONCLOCK_INFO *pnon_clock_info) {
682 unsigned long rrr_index;
683 unsigned long tmp;
684
685 ps->classification.ui_label = (le16_to_cpu(pnon_clock_info->usClassification) &
686 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
687 ps->classification.flags = make_classification_flags(hwmgr,
688 le16_to_cpu(pnon_clock_info->usClassification),
689 le16_to_cpu(pnon_clock_info->usClassification2));
690
691 ps->classification.temporary_state = false;
692 ps->classification.to_be_deleted = false;
693 tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
694 ATOM_PPLIB_SINGLE_DISPLAY_ONLY;
695
696 ps->validation.singleDisplayOnly = (0 != tmp);
697
698 tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
699 ATOM_PPLIB_DISALLOW_ON_DC;
700
701 ps->validation.disallowOnDC = (0 != tmp);
702
703 ps->pcie.lanes = ((le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
704 ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
705 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
706
707 ps->pcie.lanes = 0;
708
709 ps->display.disableFrameModulation = false;
710
711 rrr_index = (le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
712 ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK) >>
713 ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT;
714
715 if (rrr_index != ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED) {
716 static const uint8_t look_up[(ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK >> ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT) + 1] = \
717 { 0, 50, 0 };
718
719 ps->display.refreshrateSource = PP_RefreshrateSource_Explicit;
720 ps->display.explicitRefreshrate = look_up[rrr_index];
721 ps->display.limitRefreshrate = true;
722
723 if (ps->display.explicitRefreshrate == 0)
724 ps->display.limitRefreshrate = false;
725 } else
726 ps->display.limitRefreshrate = false;
727
728 tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
729 ATOM_PPLIB_ENABLE_VARIBRIGHT;
730
731 ps->display.enableVariBright = (0 != tmp);
732
733 tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
734 ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF;
735
736 ps->memory.dllOff = (0 != tmp);
737
738 ps->memory.m3arb = (le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
739 ATOM_PPLIB_M3ARB_MASK) >> ATOM_PPLIB_M3ARB_SHIFT;
740
741 ps->temperatures.min = PP_TEMPERATURE_UNITS_PER_CENTIGRADES *
742 pnon_clock_info->ucMinTemperature;
743
744 ps->temperatures.max = PP_TEMPERATURE_UNITS_PER_CENTIGRADES *
745 pnon_clock_info->ucMaxTemperature;
746
747 tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
748 ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING;
749
750 ps->software.disableLoadBalancing = tmp;
751
752 tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) &
753 ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS;
754
755 ps->software.enableSleepForTimestamps = (0 != tmp);
756
757 ps->validation.supportedPowerLevels = pnon_clock_info->ucRequiredPower;
758
759 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < version) {
760 ps->uvd_clocks.VCLK = pnon_clock_info->ulVCLK;
761 ps->uvd_clocks.DCLK = pnon_clock_info->ulDCLK;
762 } else {
763 ps->uvd_clocks.VCLK = 0;
764 ps->uvd_clocks.DCLK = 0;
765 }
766
767 return 0;
768}
769
770static ULONG size_of_entry_v2(ULONG num_dpm_levels)
771{
772 return (sizeof(UCHAR) + sizeof(UCHAR) +
773 (num_dpm_levels * sizeof(UCHAR)));
774}
775
776static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
777 const StateArray * pstate_arrays,
778 ULONG entry_index)
779{
780 ULONG i;
781 const ATOM_PPLIB_STATE_V2 *pstate;
782
783 pstate = pstate_arrays->states;
784 if (entry_index <= pstate_arrays->ucNumEntries) {
785 for (i = 0; i < entry_index; i++)
786 pstate = (ATOM_PPLIB_STATE_V2 *)(
787 (unsigned long)pstate +
788 size_of_entry_v2(pstate->ucNumDPMLevels));
789 }
790 return pstate;
791}
792
793
794static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
795 struct pp_hwmgr *hwmgr)
796{
797 const void *table_addr = NULL;
798 uint8_t frev, crev;
799 uint16_t size;
800
801 table_addr = cgs_atom_get_data_table(hwmgr->device,
802 GetIndexIntoMasterTable(DATA, PowerPlayInfo),
803 &size, &frev, &crev);
804
805 hwmgr->soft_pp_table = table_addr;
806
807 return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
808}
809
810
811int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
812 unsigned long *num_of_entries)
813{
814 const StateArray *pstate_arrays;
815 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table = get_powerplay_table(hwmgr);
816
817 if (powerplay_table == NULL)
818 return -1;
819
820 if (powerplay_table->sHeader.ucTableFormatRevision >= 6) {
821 pstate_arrays = (StateArray *)(((unsigned long)powerplay_table) +
822 le16_to_cpu(powerplay_table->usStateArrayOffset));
823
824 *num_of_entries = (unsigned long)(pstate_arrays->ucNumEntries);
825 } else
826 *num_of_entries = (unsigned long)(powerplay_table->ucNumStates);
827
828 return 0;
829}
830
831int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
832 unsigned long entry_index,
833 struct pp_power_state *ps,
834 pp_tables_hw_clock_info_callback func)
835{
836 int i;
837 const StateArray *pstate_arrays;
838 const ATOM_PPLIB_STATE_V2 *pstate_entry_v2;
839 const ATOM_PPLIB_NONCLOCK_INFO *pnon_clock_info;
840 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table = get_powerplay_table(hwmgr);
841 int result = 0;
842 int res = 0;
843
844 const ClockInfoArray *pclock_arrays;
845
846 const NonClockInfoArray *pnon_clock_arrays;
847
848 const ATOM_PPLIB_STATE *pstate_entry;
849
850 if (powerplay_table == NULL)
851 return -1;
852
853 ps->classification.bios_index = entry_index;
854
855 if (powerplay_table->sHeader.ucTableFormatRevision >= 6) {
856 pstate_arrays = (StateArray *)(((unsigned long)powerplay_table) +
857 le16_to_cpu(powerplay_table->usStateArrayOffset));
858
859 if (entry_index > pstate_arrays->ucNumEntries)
860 return -1;
861
862 pstate_entry_v2 = get_state_entry_v2(pstate_arrays, entry_index);
863 pclock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) +
864 le16_to_cpu(powerplay_table->usClockInfoArrayOffset));
865
866 pnon_clock_arrays = (NonClockInfoArray *)(((unsigned long)powerplay_table) +
867 le16_to_cpu(powerplay_table->usNonClockInfoArrayOffset));
868
869 pnon_clock_info = (ATOM_PPLIB_NONCLOCK_INFO *)((unsigned long)(pnon_clock_arrays->nonClockInfo) +
870 (pstate_entry_v2->nonClockInfoIndex * pnon_clock_arrays->ucEntrySize));
871
872 result = init_non_clock_fields(hwmgr, ps, pnon_clock_arrays->ucEntrySize, pnon_clock_info);
873
874 for (i = 0; i < pstate_entry_v2->ucNumDPMLevels; i++) {
875 const void *pclock_info = (const void *)(
876 (unsigned long)(pclock_arrays->clockInfo) +
877 (pstate_entry_v2->clockInfoIndex[i] * pclock_arrays->ucEntrySize));
878 res = func(hwmgr, &ps->hardware, i, pclock_info);
879 if ((0 == result) && (0 != res))
880 result = res;
881 }
882 } else {
883 if (entry_index > powerplay_table->ucNumStates)
884 return -1;
885
886 pstate_entry = (ATOM_PPLIB_STATE *)((unsigned long)powerplay_table + powerplay_table->usStateArrayOffset +
887 entry_index * powerplay_table->ucStateEntrySize);
888
889 pnon_clock_info = (ATOM_PPLIB_NONCLOCK_INFO *)((unsigned long)powerplay_table +
890 le16_to_cpu(powerplay_table->usNonClockInfoArrayOffset) +
891 pstate_entry->ucNonClockStateIndex *
892 powerplay_table->ucNonClockSize);
893
894 result = init_non_clock_fields(hwmgr, ps,
895 powerplay_table->ucNonClockSize,
896 pnon_clock_info);
897
898 for (i = 0; i < powerplay_table->ucStateEntrySize-1; i++) {
899 const void *pclock_info = (const void *)((unsigned long)powerplay_table +
900 le16_to_cpu(powerplay_table->usClockInfoArrayOffset) +
901 pstate_entry->ucClockStateIndices[i] *
902 powerplay_table->ucClockInfoSize);
903
904 int res = func(hwmgr, &ps->hardware, i, pclock_info);
905
906 if ((0 == result) && (0 != res))
907 result = res;
908 }
909 }
910
911 if ((0 == result) &&
912 (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot)))
913 result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware));
914
915 return result;
916}
917
918
919
920static int init_powerplay_tables(
921 struct pp_hwmgr *hwmgr,
922 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table
923)
924{
925 return 0;
926}
927
928
929static int init_thermal_controller(
930 struct pp_hwmgr *hwmgr,
931 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
932{
933 return 0;
934}
935
936static int init_overdrive_limits_V1_4(struct pp_hwmgr *hwmgr,
937 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table,
938 const ATOM_FIRMWARE_INFO_V1_4 *fw_info)
939{
940 hwmgr->platform_descriptor.overdriveLimit.engineClock =
941 le32_to_cpu(fw_info->ulASICMaxEngineClock);
942
943 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
944 le32_to_cpu(fw_info->ulASICMaxMemoryClock);
945
946 hwmgr->platform_descriptor.maxOverdriveVDDC =
947 le32_to_cpu(fw_info->ul3DAccelerationEngineClock) & 0x7FF;
948
949 hwmgr->platform_descriptor.minOverdriveVDDC =
950 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
951
952 hwmgr->platform_descriptor.maxOverdriveVDDC =
953 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
954
955 hwmgr->platform_descriptor.overdriveVDDCStep = 0;
956 return 0;
957}
958
959static int init_overdrive_limits_V2_1(struct pp_hwmgr *hwmgr,
960 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table,
961 const ATOM_FIRMWARE_INFO_V2_1 *fw_info)
962{
963 const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3;
964 const ATOM_PPLIB_EXTENDEDHEADER *header;
965
966 if (le16_to_cpu(powerplay_table->usTableSize) <
967 sizeof(ATOM_PPLIB_POWERPLAYTABLE3))
968 return 0;
969
970 powerplay_table3 = (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
971
972 if (0 == powerplay_table3->usExtendendedHeaderOffset)
973 return 0;
974
975 header = (ATOM_PPLIB_EXTENDEDHEADER *)(((unsigned long) powerplay_table) +
976 le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset));
977
978 hwmgr->platform_descriptor.overdriveLimit.engineClock = le32_to_cpu(header->ulMaxEngineClock);
979 hwmgr->platform_descriptor.overdriveLimit.memoryClock = le32_to_cpu(header->ulMaxMemoryClock);
980
981
982 hwmgr->platform_descriptor.minOverdriveVDDC = 0;
983 hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
984 hwmgr->platform_descriptor.overdriveVDDCStep = 0;
985
986 return 0;
987}
988
989static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
990 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
991{
992 int result;
993 uint8_t frev, crev;
994 uint16_t size;
995
996 const ATOM_COMMON_TABLE_HEADER *fw_info = NULL;
997
998 hwmgr->platform_descriptor.overdriveLimit.engineClock = 0;
999 hwmgr->platform_descriptor.overdriveLimit.memoryClock = 0;
1000 hwmgr->platform_descriptor.minOverdriveVDDC = 0;
1001 hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
1002
1003 /* We assume here that fw_info is unchanged if this call fails.*/
1004 fw_info = cgs_atom_get_data_table(hwmgr->device,
1005 GetIndexIntoMasterTable(DATA, FirmwareInfo),
1006 &size, &frev, &crev);
1007
1008 if ((fw_info->ucTableFormatRevision == 1)
1009 && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V1_4)))
1010 result = init_overdrive_limits_V1_4(hwmgr,
1011 powerplay_table,
1012 (const ATOM_FIRMWARE_INFO_V1_4 *)fw_info);
1013
1014 else if ((fw_info->ucTableFormatRevision == 2)
1015 && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V2_1)))
1016 result = init_overdrive_limits_V2_1(hwmgr,
1017 powerplay_table,
1018 (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info);
1019
1020 if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0
1021 && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0
1022 && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1023 PHM_PlatformCaps_OverdriveDisabledByPowerBudget))
1024 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1025 PHM_PlatformCaps_ACOverdriveSupport);
1026
1027 return result;
1028}
1029
1030static int get_uvd_clock_voltage_limit_table(struct pp_hwmgr *hwmgr,
1031 struct phm_uvd_clock_voltage_dependency_table **ptable,
1032 const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *table,
1033 const UVDClockInfoArray *array)
1034{
1035 unsigned long table_size, i;
1036 struct phm_uvd_clock_voltage_dependency_table *uvd_table;
1037
1038 table_size = sizeof(unsigned long) +
1039 sizeof(struct phm_uvd_clock_voltage_dependency_table) *
1040 table->numEntries;
1041
1042 uvd_table = kzalloc(table_size, GFP_KERNEL);
1043 if (NULL == uvd_table)
1044 return -ENOMEM;
1045
1046 uvd_table->count = table->numEntries;
1047
1048 for (i = 0; i < table->numEntries; i++) {
1049 const UVDClockInfo *entry =
1050 &array->entries[table->entries[i].ucUVDClockInfoIndex];
1051 uvd_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
1052 uvd_table->entries[i].vclk = ((unsigned long)entry->ucVClkHigh << 16)
1053 | le16_to_cpu(entry->usVClkLow);
1054 uvd_table->entries[i].dclk = ((unsigned long)entry->ucDClkHigh << 16)
1055 | le16_to_cpu(entry->usDClkLow);
1056 }
1057
1058 *ptable = uvd_table;
1059
1060 return 0;
1061}
1062
1063static int get_vce_clock_voltage_limit_table(struct pp_hwmgr *hwmgr,
1064 struct phm_vce_clock_voltage_dependency_table **ptable,
1065 const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table,
1066 const VCEClockInfoArray *array)
1067{
1068 unsigned long table_size, i;
1069 struct phm_vce_clock_voltage_dependency_table *vce_table = NULL;
1070
1071 table_size = sizeof(unsigned long) +
1072 sizeof(struct phm_vce_clock_voltage_dependency_table)
1073 * table->numEntries;
1074
1075 vce_table = kzalloc(table_size, GFP_KERNEL);
1076 if (NULL == vce_table)
1077 return -ENOMEM;
1078
1079 vce_table->count = table->numEntries;
1080 for (i = 0; i < table->numEntries; i++) {
1081 const VCEClockInfo *entry = &array->entries[table->entries[i].ucVCEClockInfoIndex];
1082
1083 vce_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
1084 vce_table->entries[i].evclk = ((unsigned long)entry->ucEVClkHigh << 16)
1085 | le16_to_cpu(entry->usEVClkLow);
1086 vce_table->entries[i].ecclk = ((unsigned long)entry->ucECClkHigh << 16)
1087 | le16_to_cpu(entry->usECClkLow);
1088 }
1089
1090 *ptable = vce_table;
1091
1092 return 0;
1093}
1094
1095static int get_samu_clock_voltage_limit_table(struct pp_hwmgr *hwmgr,
1096 struct phm_samu_clock_voltage_dependency_table **ptable,
1097 const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *table)
1098{
1099 unsigned long table_size, i;
1100 struct phm_samu_clock_voltage_dependency_table *samu_table;
1101
1102 table_size = sizeof(unsigned long) +
1103 sizeof(struct phm_samu_clock_voltage_dependency_table) *
1104 table->numEntries;
1105
1106 samu_table = kzalloc(table_size, GFP_KERNEL);
1107 if (NULL == samu_table)
1108 return -ENOMEM;
1109
1110 samu_table->count = table->numEntries;
1111
1112 for (i = 0; i < table->numEntries; i++) {
1113 samu_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
1114 samu_table->entries[i].samclk = ((unsigned long)table->entries[i].ucSAMClockHigh << 16)
1115 | le16_to_cpu(table->entries[i].usSAMClockLow);
1116 }
1117
1118 *ptable = samu_table;
1119
1120 return 0;
1121}
1122
1123static int get_acp_clock_voltage_limit_table(struct pp_hwmgr *hwmgr,
1124 struct phm_acp_clock_voltage_dependency_table **ptable,
1125 const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *table)
1126{
1127 unsigned table_size, i;
1128 struct phm_acp_clock_voltage_dependency_table *acp_table;
1129
1130 table_size = sizeof(unsigned long) +
1131 sizeof(struct phm_acp_clock_voltage_dependency_table) *
1132 table->numEntries;
1133
1134 acp_table = kzalloc(table_size, GFP_KERNEL);
1135 if (NULL == acp_table)
1136 return -ENOMEM;
1137
1138 acp_table->count = (unsigned long)table->numEntries;
1139
1140 for (i = 0; i < table->numEntries; i++) {
1141 acp_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
1142 acp_table->entries[i].acpclk = ((unsigned long)table->entries[i].ucACPClockHigh << 16)
1143 | le16_to_cpu(table->entries[i].usACPClockLow);
1144 }
1145
1146 *ptable = acp_table;
1147
1148 return 0;
1149}
1150
1151static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr,
1152 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
1153{
1154 ATOM_PPLIB_Clock_Voltage_Dependency_Table *table;
1155 ATOM_PPLIB_Clock_Voltage_Limit_Table *limit_table;
1156 int result = 0;
1157
1158 uint16_t vce_clock_info_array_offset;
1159 uint16_t uvd_clock_info_array_offset;
1160 uint16_t table_offset;
1161
1162 hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
1163 hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
1164 hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
1165 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1166 hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
1167 hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
1168 hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
1169 hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
1170 hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
1171 hwmgr->dyn_state.ppm_parameter_table = NULL;
1172 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
1173
1174 vce_clock_info_array_offset = get_vce_clock_info_array_offset(
1175 hwmgr, powerplay_table);
1176 table_offset = get_vce_clock_voltage_limit_table_offset(hwmgr,
1177 powerplay_table);
1178 if (vce_clock_info_array_offset > 0 && table_offset > 0) {
1179 const VCEClockInfoArray *array = (const VCEClockInfoArray *)
1180 (((unsigned long) powerplay_table) +
1181 vce_clock_info_array_offset);
1182 const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table =
1183 (const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1184 (((unsigned long) powerplay_table) + table_offset);
1185 result = get_vce_clock_voltage_limit_table(hwmgr,
1186 &hwmgr->dyn_state.vce_clock_voltage_dependency_table,
1187 table, array);
1188 }
1189
1190 uvd_clock_info_array_offset = get_uvd_clock_info_array_offset(hwmgr, powerplay_table);
1191 table_offset = get_uvd_clock_voltage_limit_table_offset(hwmgr, powerplay_table);
1192
1193 if (uvd_clock_info_array_offset > 0 && table_offset > 0) {
1194 const UVDClockInfoArray *array = (const UVDClockInfoArray *)
1195 (((unsigned long) powerplay_table) +
1196 uvd_clock_info_array_offset);
1197 const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *ptable =
1198 (const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1199 (((unsigned long) powerplay_table) + table_offset);
1200 result = get_uvd_clock_voltage_limit_table(hwmgr,
1201 &hwmgr->dyn_state.uvd_clock_voltage_dependency_table, ptable, array);
1202 }
1203
1204 table_offset = get_samu_clock_voltage_limit_table_offset(hwmgr,
1205 powerplay_table);
1206
1207 if (table_offset > 0) {
1208 const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *ptable =
1209 (const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1210 (((unsigned long) powerplay_table) + table_offset);
1211 result = get_samu_clock_voltage_limit_table(hwmgr,
1212 &hwmgr->dyn_state.samu_clock_voltage_dependency_table, ptable);
1213 }
1214
1215 table_offset = get_acp_clock_voltage_limit_table_offset(hwmgr,
1216 powerplay_table);
1217
1218 if (table_offset > 0) {
1219 const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *ptable =
1220 (const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1221 (((unsigned long) powerplay_table) + table_offset);
1222 result = get_acp_clock_voltage_limit_table(hwmgr,
1223 &hwmgr->dyn_state.acp_clock_voltage_dependency_table, ptable);
1224 }
1225
1226 table_offset = get_cacp_tdp_table_offset(hwmgr, powerplay_table);
1227 if (table_offset > 0) {
1228 UCHAR rev_id = *(UCHAR *)(((unsigned long)powerplay_table) + table_offset);
1229
1230 if (rev_id > 0) {
1231 const ATOM_PPLIB_POWERTUNE_Table_V1 *tune_table =
1232 (const ATOM_PPLIB_POWERTUNE_Table_V1 *)
1233 (((unsigned long) powerplay_table) + table_offset);
1234 result = get_cac_tdp_table(hwmgr, &hwmgr->dyn_state.cac_dtp_table,
1235 &tune_table->power_tune_table,
1236 le16_to_cpu(tune_table->usMaximumPowerDeliveryLimit));
1237 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
1238 le16_to_cpu(tune_table->usTjMax);
1239 } else {
1240 const ATOM_PPLIB_POWERTUNE_Table *tune_table =
1241 (const ATOM_PPLIB_POWERTUNE_Table *)
1242 (((unsigned long) powerplay_table) + table_offset);
1243 result = get_cac_tdp_table(hwmgr,
1244 &hwmgr->dyn_state.cac_dtp_table,
1245 &tune_table->power_tune_table, 255);
1246 }
1247 }
1248
1249 if (le16_to_cpu(powerplay_table->usTableSize) >=
1250 sizeof(ATOM_PPLIB_POWERPLAYTABLE4)) {
1251 const ATOM_PPLIB_POWERPLAYTABLE4 *powerplay_table4 =
1252 (const ATOM_PPLIB_POWERPLAYTABLE4 *)powerplay_table;
1253 if (0 != powerplay_table4->usVddcDependencyOnSCLKOffset) {
1254 table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
1255 (((unsigned long) powerplay_table4) +
1256 powerplay_table4->usVddcDependencyOnSCLKOffset);
1257 result = get_clock_voltage_dependency_table(hwmgr,
1258 &hwmgr->dyn_state.vddc_dependency_on_sclk, table);
1259 }
1260
1261 if (result == 0 && (0 != powerplay_table4->usVddciDependencyOnMCLKOffset)) {
1262 table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
1263 (((unsigned long) powerplay_table4) +
1264 powerplay_table4->usVddciDependencyOnMCLKOffset);
1265 result = get_clock_voltage_dependency_table(hwmgr,
1266 &hwmgr->dyn_state.vddci_dependency_on_mclk, table);
1267 }
1268
1269 if (result == 0 && (0 != powerplay_table4->usVddcDependencyOnMCLKOffset)) {
1270 table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
1271 (((unsigned long) powerplay_table4) +
1272 powerplay_table4->usVddcDependencyOnMCLKOffset);
1273 result = get_clock_voltage_dependency_table(hwmgr,
1274 &hwmgr->dyn_state.vddc_dependency_on_mclk, table);
1275 }
1276
1277 if (result == 0 && (0 != powerplay_table4->usMaxClockVoltageOnDCOffset)) {
1278 limit_table = (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
1279 (((unsigned long) powerplay_table4) +
1280 powerplay_table4->usMaxClockVoltageOnDCOffset);
1281 result = get_clock_voltage_limit(hwmgr,
1282 &hwmgr->dyn_state.max_clock_voltage_on_dc, limit_table);
1283 }
1284
1285 if (result == 0 && (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) &&
1286 (0 != hwmgr->dyn_state.vddc_dependency_on_mclk->count))
1287 result = get_valid_clk(hwmgr, &hwmgr->dyn_state.valid_mclk_values,
1288 hwmgr->dyn_state.vddc_dependency_on_mclk);
1289
1290 if(result == 0 && (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) &&
1291 (0 != hwmgr->dyn_state.vddc_dependency_on_sclk->count))
1292 result = get_valid_clk(hwmgr,
1293 &hwmgr->dyn_state.valid_sclk_values,
1294 hwmgr->dyn_state.vddc_dependency_on_sclk);
1295
1296 if (result == 0 && (0 != powerplay_table4->usMvddDependencyOnMCLKOffset)) {
1297 table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
1298 (((unsigned long) powerplay_table4) +
1299 powerplay_table4->usMvddDependencyOnMCLKOffset);
1300 result = get_clock_voltage_dependency_table(hwmgr,
1301 &hwmgr->dyn_state.mvdd_dependency_on_mclk, table);
1302 }
1303 }
1304
1305 table_offset = get_sclk_vdd_gfx_clock_voltage_dependency_table_offset(hwmgr,
1306 powerplay_table);
1307
1308 if (table_offset > 0) {
1309 table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
1310 (((unsigned long) powerplay_table) + table_offset);
1311 result = get_clock_voltage_dependency_table(hwmgr,
1312 &hwmgr->dyn_state.vdd_gfx_dependency_on_sclk, table);
1313 }
1314
1315 return result;
1316}
1317
1318static int get_cac_leakage_table(struct pp_hwmgr *hwmgr,
1319 struct phm_cac_leakage_table **ptable,
1320 const ATOM_PPLIB_CAC_Leakage_Table *table)
1321{
1322 struct phm_cac_leakage_table *cac_leakage_table;
1323 unsigned long table_size, i;
1324
1325 if (hwmgr == NULL || table == NULL || ptable == NULL)
1326 return -EINVAL;
1327
1328 table_size = sizeof(ULONG) +
1329 (sizeof(struct phm_cac_leakage_table) * table->ucNumEntries);
1330
1331 cac_leakage_table = kzalloc(table_size, GFP_KERNEL);
1332
1333 if (cac_leakage_table == NULL)
1334 return -ENOMEM;
1335
1336 cac_leakage_table->count = (ULONG)table->ucNumEntries;
1337
1338 for (i = 0; i < cac_leakage_table->count; i++) {
1339 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1340 PHM_PlatformCaps_EVV)) {
1341 cac_leakage_table->entries[i].Vddc1 = le16_to_cpu(table->entries[i].usVddc1);
1342 cac_leakage_table->entries[i].Vddc2 = le16_to_cpu(table->entries[i].usVddc2);
1343 cac_leakage_table->entries[i].Vddc3 = le16_to_cpu(table->entries[i].usVddc3);
1344 } else {
1345 cac_leakage_table->entries[i].Vddc = le16_to_cpu(table->entries[i].usVddc);
1346 cac_leakage_table->entries[i].Leakage = le32_to_cpu(table->entries[i].ulLeakageValue);
1347 }
1348 }
1349
1350 *ptable = cac_leakage_table;
1351
1352 return 0;
1353}
1354
1355static int get_platform_power_management_table(struct pp_hwmgr *hwmgr,
1356 ATOM_PPLIB_PPM_Table *atom_ppm_table)
1357{
1358 struct phm_ppm_table *ptr = kzalloc(sizeof(struct phm_ppm_table), GFP_KERNEL);
1359
1360 if (NULL == ptr)
1361 return -ENOMEM;
1362
1363 ptr->ppm_design = atom_ppm_table->ucPpmDesign;
1364 ptr->cpu_core_number = le16_to_cpu(atom_ppm_table->usCpuCoreNumber);
1365 ptr->platform_tdp = le32_to_cpu(atom_ppm_table->ulPlatformTDP);
1366 ptr->small_ac_platform_tdp = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDP);
1367 ptr->platform_tdc = le32_to_cpu(atom_ppm_table->ulPlatformTDC);
1368 ptr->small_ac_platform_tdc = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDC);
1369 ptr->apu_tdp = le32_to_cpu(atom_ppm_table->ulApuTDP);
1370 ptr->dgpu_tdp = le32_to_cpu(atom_ppm_table->ulDGpuTDP);
1371 ptr->dgpu_ulv_power = le32_to_cpu(atom_ppm_table->ulDGpuUlvPower);
1372 ptr->tj_max = le32_to_cpu(atom_ppm_table->ulTjmax);
1373 hwmgr->dyn_state.ppm_parameter_table = ptr;
1374
1375 return 0;
1376}
1377
1378static int init_dpm2_parameters(struct pp_hwmgr *hwmgr,
1379 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
1380{
1381 int result = 0;
1382
1383 if (le16_to_cpu(powerplay_table->usTableSize) >=
1384 sizeof(ATOM_PPLIB_POWERPLAYTABLE5)) {
1385 const ATOM_PPLIB_POWERPLAYTABLE5 *ptable5 =
1386 (const ATOM_PPLIB_POWERPLAYTABLE5 *)powerplay_table;
1387 const ATOM_PPLIB_POWERPLAYTABLE4 *ptable4 =
1388 (const ATOM_PPLIB_POWERPLAYTABLE4 *)
1389 (&ptable5->basicTable4);
1390 const ATOM_PPLIB_POWERPLAYTABLE3 *ptable3 =
1391 (const ATOM_PPLIB_POWERPLAYTABLE3 *)
1392 (&ptable4->basicTable3);
1393 const ATOM_PPLIB_EXTENDEDHEADER *extended_header;
1394 uint16_t table_offset;
1395 ATOM_PPLIB_PPM_Table *atom_ppm_table;
1396
1397 hwmgr->platform_descriptor.TDPLimit = le32_to_cpu(ptable5->ulTDPLimit);
1398 hwmgr->platform_descriptor.nearTDPLimit = le32_to_cpu(ptable5->ulNearTDPLimit);
1399
1400 hwmgr->platform_descriptor.TDPODLimit = le16_to_cpu(ptable5->usTDPODLimit);
1401 hwmgr->platform_descriptor.TDPAdjustment = 0;
1402
1403 hwmgr->platform_descriptor.VidAdjustment = 0;
1404 hwmgr->platform_descriptor.VidAdjustmentPolarity = 0;
1405 hwmgr->platform_descriptor.VidMinLimit = 0;
1406 hwmgr->platform_descriptor.VidMaxLimit = 1500000;
1407 hwmgr->platform_descriptor.VidStep = 6250;
1408
1409 hwmgr->platform_descriptor.nearTDPLimitAdjusted = le32_to_cpu(ptable5->ulNearTDPLimit);
1410
1411 if (hwmgr->platform_descriptor.TDPODLimit != 0)
1412 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1413 PHM_PlatformCaps_PowerControl);
1414
1415 hwmgr->platform_descriptor.SQRampingThreshold = le32_to_cpu(ptable5->ulSQRampingThreshold);
1416
1417 hwmgr->platform_descriptor.CACLeakage = le32_to_cpu(ptable5->ulCACLeakage);
1418
1419 hwmgr->dyn_state.cac_leakage_table = NULL;
1420
1421 if (0 != ptable5->usCACLeakageTableOffset) {
1422 const ATOM_PPLIB_CAC_Leakage_Table *pCAC_leakage_table =
1423 (ATOM_PPLIB_CAC_Leakage_Table *)(((unsigned long)ptable5) +
1424 le16_to_cpu(ptable5->usCACLeakageTableOffset));
1425 result = get_cac_leakage_table(hwmgr,
1426 &hwmgr->dyn_state.cac_leakage_table, pCAC_leakage_table);
1427 }
1428
1429 hwmgr->platform_descriptor.LoadLineSlope = le16_to_cpu(ptable5->usLoadLineSlope);
1430
1431 hwmgr->dyn_state.ppm_parameter_table = NULL;
1432
1433 if (0 != ptable3->usExtendendedHeaderOffset) {
1434 extended_header = (const ATOM_PPLIB_EXTENDEDHEADER *)
1435 (((unsigned long)powerplay_table) +
1436 le16_to_cpu(ptable3->usExtendendedHeaderOffset));
1437 if ((extended_header->usPPMTableOffset > 0) &&
1438 le16_to_cpu(extended_header->usSize) >=
1439 SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) {
1440 table_offset = le16_to_cpu(extended_header->usPPMTableOffset);
1441 atom_ppm_table = (ATOM_PPLIB_PPM_Table *)
1442 (((unsigned long)powerplay_table) + table_offset);
1443 if (0 == get_platform_power_management_table(hwmgr, atom_ppm_table))
1444 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1445 PHM_PlatformCaps_EnablePlatformPowerManagement);
1446 }
1447 }
1448 }
1449 return result;
1450}
1451
1452static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
1453 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
1454{
1455 if (le16_to_cpu(powerplay_table->usTableSize) >=
1456 sizeof(ATOM_PPLIB_POWERPLAYTABLE4)) {
1457 const ATOM_PPLIB_POWERPLAYTABLE4 *powerplay_table4 =
1458 (const ATOM_PPLIB_POWERPLAYTABLE4 *)powerplay_table;
1459
1460 if (0 != powerplay_table4->usVddcPhaseShedLimitsTableOffset) {
1461 const ATOM_PPLIB_PhaseSheddingLimits_Table *ptable =
1462 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
1463 (((unsigned long)powerplay_table4) +
1464 le16_to_cpu(powerplay_table4->usVddcPhaseShedLimitsTableOffset));
1465 struct phm_phase_shedding_limits_table *table;
1466 unsigned long size, i;
1467
1468
1469 size = sizeof(unsigned long) +
1470 (sizeof(struct phm_phase_shedding_limits_table) *
1471 ptable->ucNumEntries);
1472
1473 table = kzalloc(size, GFP_KERNEL);
1474
1475 if (table == NULL)
1476 return -ENOMEM;
1477
1478 table->count = (unsigned long)ptable->ucNumEntries;
1479
1480 for (i = 0; i < table->count; i++) {
1481 table->entries[i].Voltage = (unsigned long)le16_to_cpu(ptable->entries[i].usVoltage);
1482 table->entries[i].Sclk = ((unsigned long)ptable->entries[i].ucSclkHigh << 16)
1483 | le16_to_cpu(ptable->entries[i].usSclkLow);
1484 table->entries[i].Mclk = ((unsigned long)ptable->entries[i].ucMclkHigh << 16)
1485 | le16_to_cpu(ptable->entries[i].usMclkLow);
1486 }
1487 hwmgr->dyn_state.vddc_phase_shed_limits_table = table;
1488 }
1489 }
1490
1491 return 0;
1492}
1493
1494int get_number_of_vce_state_table_entries(
1495 struct pp_hwmgr *hwmgr)
1496{
1497 const ATOM_PPLIB_POWERPLAYTABLE *table =
1498 get_powerplay_table(hwmgr);
1499 const ATOM_PPLIB_VCE_State_Table *vce_table =
1500 get_vce_state_table(hwmgr, table);
1501
1502 if (vce_table > 0)
1503 return vce_table->numEntries;
1504
1505 return 0;
1506}
1507
1508int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
1509 unsigned long i,
1510 struct PP_VCEState *vce_state,
1511 void **clock_info,
1512 unsigned long *flag)
1513{
1514 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table = get_powerplay_table(hwmgr);
1515
1516 const ATOM_PPLIB_VCE_State_Table *vce_state_table = get_vce_state_table(hwmgr, powerplay_table);
1517
1518 unsigned short vce_clock_info_array_offset = get_vce_clock_info_array_offset(hwmgr, powerplay_table);
1519
1520 const VCEClockInfoArray *vce_clock_info_array = (const VCEClockInfoArray *)(((unsigned long) powerplay_table) + vce_clock_info_array_offset);
1521
1522 const ClockInfoArray *clock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) + powerplay_table->usClockInfoArrayOffset);
1523
1524 const ATOM_PPLIB_VCE_State_Record *record = &vce_state_table->entries[i];
1525
1526 const VCEClockInfo *vce_clock_info = &vce_clock_info_array->entries[record->ucVCEClockInfoIndex];
1527
1528 unsigned long clockInfoIndex = record->ucClockInfoIndex & 0x3F;
1529
1530 *flag = (record->ucClockInfoIndex >> NUM_BITS_CLOCK_INFO_ARRAY_INDEX);
1531
1532 vce_state->evclk = ((uint32_t)vce_clock_info->ucEVClkHigh << 16) | vce_clock_info->usEVClkLow;
1533 vce_state->ecclk = ((uint32_t)vce_clock_info->ucECClkHigh << 16) | vce_clock_info->usECClkLow;
1534
1535 *clock_info = (void *)((unsigned long)(clock_arrays->clockInfo) + (clockInfoIndex * clock_arrays->ucEntrySize));
1536
1537 return 0;
1538}
1539
1540
1541static int pp_tables_initialize(struct pp_hwmgr *hwmgr)
1542{
1543 int result;
1544 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table;
1545
1546 hwmgr->need_pp_table_upload = true;
1547
1548 powerplay_table = get_powerplay_table(hwmgr);
1549
1550 result = init_powerplay_tables(hwmgr, powerplay_table);
1551
1552 PP_ASSERT_WITH_CODE((result == 0),
1553 "init_powerplay_tables failed", return result);
1554
1555 result = set_platform_caps(hwmgr,
1556 le32_to_cpu(powerplay_table->ulPlatformCaps));
1557
1558 PP_ASSERT_WITH_CODE((result == 0),
1559 "set_platform_caps failed", return result);
1560
1561 result = init_thermal_controller(hwmgr, powerplay_table);
1562
1563 PP_ASSERT_WITH_CODE((result == 0),
1564 "init_thermal_controller failed", return result);
1565
1566 result = init_overdrive_limits(hwmgr, powerplay_table);
1567
1568 PP_ASSERT_WITH_CODE((result == 0),
1569 "init_overdrive_limits failed", return result);
1570
1571 result = init_clock_voltage_dependency(hwmgr,
1572 powerplay_table);
1573
1574 PP_ASSERT_WITH_CODE((result == 0),
1575 "init_clock_voltage_dependency failed", return result);
1576
1577 result = init_dpm2_parameters(hwmgr, powerplay_table);
1578
1579 PP_ASSERT_WITH_CODE((result == 0),
1580 "init_dpm2_parameters failed", return result);
1581
1582 result = init_phase_shedding_table(hwmgr, powerplay_table);
1583
1584 PP_ASSERT_WITH_CODE((result == 0),
1585 "init_phase_shedding_table failed", return result);
1586
1587 return result;
1588}
1589
1590static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1591{
1592 if (NULL != hwmgr->soft_pp_table) {
1593 kfree(hwmgr->soft_pp_table);
1594 hwmgr->soft_pp_table = NULL;
1595 }
1596
1597 if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
1598 kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
1599 hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
1600 }
1601
1602 if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1603 kfree(hwmgr->dyn_state.vddci_dependency_on_mclk);
1604 hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
1605 }
1606
1607 if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
1608 kfree(hwmgr->dyn_state.vddc_dependency_on_mclk);
1609 hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
1610 }
1611
1612 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1613 kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk);
1614 hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
1615 }
1616
1617 if (NULL != hwmgr->dyn_state.valid_mclk_values) {
1618 kfree(hwmgr->dyn_state.valid_mclk_values);
1619 hwmgr->dyn_state.valid_mclk_values = NULL;
1620 }
1621
1622 if (NULL != hwmgr->dyn_state.valid_sclk_values) {
1623 kfree(hwmgr->dyn_state.valid_sclk_values);
1624 hwmgr->dyn_state.valid_sclk_values = NULL;
1625 }
1626
1627 if (NULL != hwmgr->dyn_state.cac_leakage_table) {
1628 kfree(hwmgr->dyn_state.cac_leakage_table);
1629 hwmgr->dyn_state.cac_leakage_table = NULL;
1630 }
1631
1632 if (NULL != hwmgr->dyn_state.vddc_phase_shed_limits_table) {
1633 kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table);
1634 hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL;
1635 }
1636
1637 if (NULL != hwmgr->dyn_state.vce_clock_voltage_dependency_table) {
1638 kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table);
1639 hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
1640 }
1641
1642 if (NULL != hwmgr->dyn_state.uvd_clock_voltage_dependency_table) {
1643 kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
1644 hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
1645 }
1646
1647 if (NULL != hwmgr->dyn_state.samu_clock_voltage_dependency_table) {
1648 kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table);
1649 hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
1650 }
1651
1652 if (NULL != hwmgr->dyn_state.acp_clock_voltage_dependency_table) {
1653 kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table);
1654 hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
1655 }
1656
1657 if (NULL != hwmgr->dyn_state.cac_dtp_table) {
1658 kfree(hwmgr->dyn_state.cac_dtp_table);
1659 hwmgr->dyn_state.cac_dtp_table = NULL;
1660 }
1661
1662 if (NULL != hwmgr->dyn_state.ppm_parameter_table) {
1663 kfree(hwmgr->dyn_state.ppm_parameter_table);
1664 hwmgr->dyn_state.ppm_parameter_table = NULL;
1665 }
1666
1667 if (NULL != hwmgr->dyn_state.vdd_gfx_dependency_on_sclk) {
1668 kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
1669 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
1670 }
1671
1672 if (NULL != hwmgr->dyn_state.vq_budgeting_table) {
1673 kfree(hwmgr->dyn_state.vq_budgeting_table);
1674 hwmgr->dyn_state.vq_budgeting_table = NULL;
1675 }
1676
1677 return 0;
1678}
1679
1680const struct pp_table_func pptable_funcs = {
1681 .pptable_init = pp_tables_initialize,
1682 .pptable_fini = pp_tables_uninitialize,
1683 .pptable_get_number_of_vce_state_table_entries =
1684 get_number_of_vce_state_table_entries,
1685 .pptable_get_vce_state_table_entry =
1686 get_vce_state_table_entry,
1687};
1688
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
new file mode 100644
index 000000000000..30434802417e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 * Interface Functions related to the BIOS PowerPlay Tables.
22 *
23 */
24
25#ifndef PROCESSPPTABLES_H
26#define PROCESSPPTABLES_H
27
28struct pp_hwmgr;
29struct pp_power_state;
30struct pp_hw_power_state;
31
32extern const struct pp_table_func pptable_funcs;
33
34typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr,
35 struct pp_hw_power_state *hw_ps,
36 unsigned int index,
37 const void *clock_info);
38
39int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
40 unsigned long *num_of_entries);
41
42int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
43 unsigned long entry_index,
44 struct pp_power_state *ps,
45 pp_tables_hw_clock_info_callback func);
46
47#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
new file mode 100644
index 000000000000..e58d038a997b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
@@ -0,0 +1,350 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "tonga_clockpowergating.h"
26#include "tonga_ppsmc.h"
27#include "tonga_hwmgr.h"
28
29int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
30{
31 if (phm_cf_want_uvd_power_gating(hwmgr))
32 return smum_send_msg_to_smc(hwmgr->smumgr,
33 PPSMC_MSG_UVDPowerOFF);
34 return 0;
35}
36
37int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
38{
39 if (phm_cf_want_uvd_power_gating(hwmgr)) {
40 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
41 PHM_PlatformCaps_UVDDynamicPowerGating)) {
42 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
43 PPSMC_MSG_UVDPowerON, 1);
44 } else {
45 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
46 PPSMC_MSG_UVDPowerON, 0);
47 }
48 }
49
50 return 0;
51}
52
53int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
54{
55 if (phm_cf_want_vce_power_gating(hwmgr))
56 return smum_send_msg_to_smc(hwmgr->smumgr,
57 PPSMC_MSG_VCEPowerOFF);
58 return 0;
59}
60
61int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
62{
63 if (phm_cf_want_vce_power_gating(hwmgr))
64 return smum_send_msg_to_smc(hwmgr->smumgr,
65 PPSMC_MSG_VCEPowerON);
66 return 0;
67}
68
69int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
70{
71 int ret = 0;
72
73 switch (block) {
74 case PHM_AsicBlock_UVD_MVC:
75 case PHM_AsicBlock_UVD:
76 case PHM_AsicBlock_UVD_HD:
77 case PHM_AsicBlock_UVD_SD:
78 if (gating == PHM_ClockGateSetting_StaticOff)
79 ret = tonga_phm_powerdown_uvd(hwmgr);
80 else
81 ret = tonga_phm_powerup_uvd(hwmgr);
82 break;
83 case PHM_AsicBlock_GFX:
84 default:
85 break;
86 }
87
88 return ret;
89}
90
91int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
92{
93 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
94
95 data->uvd_power_gated = false;
96 data->vce_power_gated = false;
97
98 tonga_phm_powerup_uvd(hwmgr);
99 tonga_phm_powerup_vce(hwmgr);
100
101 return 0;
102}
103
104int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
105{
106 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
107
108 if (data->uvd_power_gated == bgate)
109 return 0;
110
111 data->uvd_power_gated = bgate;
112
113 if (bgate) {
114 cgs_set_clockgating_state(hwmgr->device,
115 AMD_IP_BLOCK_TYPE_UVD,
116 AMD_CG_STATE_UNGATE);
117 cgs_set_powergating_state(hwmgr->device,
118 AMD_IP_BLOCK_TYPE_UVD,
119 AMD_PG_STATE_GATE);
120 tonga_update_uvd_dpm(hwmgr, true);
121 tonga_phm_powerdown_uvd(hwmgr);
122 } else {
123 tonga_phm_powerup_uvd(hwmgr);
124 cgs_set_powergating_state(hwmgr->device,
125 AMD_IP_BLOCK_TYPE_UVD,
126 AMD_PG_STATE_UNGATE);
127 cgs_set_clockgating_state(hwmgr->device,
128 AMD_IP_BLOCK_TYPE_UVD,
129 AMD_PG_STATE_GATE);
130
131 tonga_update_uvd_dpm(hwmgr, false);
132 }
133
134 return 0;
135}
136
137int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
138{
139 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
140 struct phm_set_power_state_input states;
141 const struct pp_power_state *pcurrent;
142 struct pp_power_state *requested;
143
144 pcurrent = hwmgr->current_ps;
145 requested = hwmgr->request_ps;
146
147 states.pcurrent_state = &(pcurrent->hardware);
148 states.pnew_state = &(requested->hardware);
149
150 if (phm_cf_want_vce_power_gating(hwmgr)) {
151 if (data->vce_power_gated != bgate) {
152 if (bgate) {
153 cgs_set_clockgating_state(
154 hwmgr->device,
155 AMD_IP_BLOCK_TYPE_VCE,
156 AMD_CG_STATE_UNGATE);
157 cgs_set_powergating_state(
158 hwmgr->device,
159 AMD_IP_BLOCK_TYPE_VCE,
160 AMD_PG_STATE_GATE);
161 tonga_enable_disable_vce_dpm(hwmgr, false);
162 data->vce_power_gated = true;
163 } else {
164 tonga_phm_powerup_vce(hwmgr);
165 data->vce_power_gated = false;
166 cgs_set_powergating_state(
167 hwmgr->device,
168 AMD_IP_BLOCK_TYPE_VCE,
169 AMD_PG_STATE_UNGATE);
170 cgs_set_clockgating_state(
171 hwmgr->device,
172 AMD_IP_BLOCK_TYPE_VCE,
173 AMD_PG_STATE_GATE);
174
175 tonga_update_vce_dpm(hwmgr, &states);
176 tonga_enable_disable_vce_dpm(hwmgr, true);
177 return 0;
178 }
179 }
180 } else {
181 tonga_update_vce_dpm(hwmgr, &states);
182 tonga_enable_disable_vce_dpm(hwmgr, true);
183 return 0;
184 }
185
186 if (!data->vce_power_gated)
187 tonga_update_vce_dpm(hwmgr, &states);
188
189 return 0;
190}
191
192int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
193 const uint32_t *msg_id)
194{
195 PPSMC_Msg msg;
196 uint32_t value;
197
198 switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
199 case PP_GROUP_GFX:
200 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
201 case PP_BLOCK_GFX_CG:
202 if (PP_STATE_SUPPORT_CG & *msg_id) {
203 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
204 ? PPSMC_MSG_EnableClockGatingFeature
205 : PPSMC_MSG_DisableClockGatingFeature;
206 value = CG_GFX_CGCG_MASK;
207
208 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
209 return -1;
210 }
211 if (PP_STATE_SUPPORT_LS & *msg_id) {
212 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
213 ? PPSMC_MSG_EnableClockGatingFeature
214 : PPSMC_MSG_DisableClockGatingFeature;
215 value = CG_GFX_CGLS_MASK;
216
217 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
218 return -1;
219 }
220 break;
221
222 case PP_BLOCK_GFX_MG:
223 /* For GFX MGCG, there are three different ones;
224 * CPF, RLC, and all others. CPF MGCG will not be used for Tonga.
225 * For GFX MGLS, Tonga will not support it.
226 * */
227 if (PP_STATE_SUPPORT_CG & *msg_id) {
228 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
229 ? PPSMC_MSG_EnableClockGatingFeature
230 : PPSMC_MSG_DisableClockGatingFeature;
231 value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
232
233 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
234 return -1;
235 }
236 break;
237
238 default:
239 return -1;
240 }
241 break;
242
243 case PP_GROUP_SYS:
244 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
245 case PP_BLOCK_SYS_BIF:
246 if (PP_STATE_SUPPORT_LS & *msg_id) {
247 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
248 ? PPSMC_MSG_EnableClockGatingFeature
249 : PPSMC_MSG_DisableClockGatingFeature;
250 value = CG_SYS_BIF_MGLS_MASK;
251
252 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
253 return -1;
254 }
255 break;
256
257 case PP_BLOCK_SYS_MC:
258 if (PP_STATE_SUPPORT_CG & *msg_id) {
259 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
260 ? PPSMC_MSG_EnableClockGatingFeature
261 : PPSMC_MSG_DisableClockGatingFeature;
262 value = CG_SYS_MC_MGCG_MASK;
263
264 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
265 return -1;
266 }
267
268 if (PP_STATE_SUPPORT_LS & *msg_id) {
269 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
270 ? PPSMC_MSG_EnableClockGatingFeature
271 : PPSMC_MSG_DisableClockGatingFeature;
272 value = CG_SYS_MC_MGLS_MASK;
273
274 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
275 return -1;
276
277 }
278 break;
279
280 case PP_BLOCK_SYS_HDP:
281 if (PP_STATE_SUPPORT_CG & *msg_id) {
282 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
283 ? PPSMC_MSG_EnableClockGatingFeature
284 : PPSMC_MSG_DisableClockGatingFeature;
285 value = CG_SYS_HDP_MGCG_MASK;
286
287 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
288 return -1;
289 }
290
291 if (PP_STATE_SUPPORT_LS & *msg_id) {
292 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
293 ? PPSMC_MSG_EnableClockGatingFeature
294 : PPSMC_MSG_DisableClockGatingFeature;
295
296 value = CG_SYS_HDP_MGLS_MASK;
297
298 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
299 return -1;
300 }
301 break;
302
303 case PP_BLOCK_SYS_SDMA:
304 if (PP_STATE_SUPPORT_CG & *msg_id) {
305 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
306 ? PPSMC_MSG_EnableClockGatingFeature
307 : PPSMC_MSG_DisableClockGatingFeature;
308 value = CG_SYS_SDMA_MGCG_MASK;
309
310 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
311 return -1;
312 }
313
314 if (PP_STATE_SUPPORT_LS & *msg_id) {
315 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
316 ? PPSMC_MSG_EnableClockGatingFeature
317 : PPSMC_MSG_DisableClockGatingFeature;
318
319 value = CG_SYS_SDMA_MGLS_MASK;
320
321 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
322 return -1;
323 }
324 break;
325
326 case PP_BLOCK_SYS_ROM:
327 if (PP_STATE_SUPPORT_CG & *msg_id) {
328 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
329 ? PPSMC_MSG_EnableClockGatingFeature
330 : PPSMC_MSG_DisableClockGatingFeature;
331 value = CG_SYS_ROM_MASK;
332
333 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
334 return -1;
335 }
336 break;
337
338 default:
339 return -1;
340
341 }
342 break;
343
344 default:
345 return -1;
346
347 }
348
349 return 0;
350}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
new file mode 100644
index 000000000000..8bc38cb17b7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _TONGA_CLOCK_POWER_GATING_H_
25#define _TONGA_CLOCK_POWER_GATING_H_
26
27#include "tonga_hwmgr.h"
28#include "pp_asicblocks.h"
29
30extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
31extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
32extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
33extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
34extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
35extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
36#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
new file mode 100644
index 000000000000..080d69d77f04
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef TONGA_DYN_DEFAULTS_H
24#define TONGA_DYN_DEFAULTS_H
25
26
27/** \file
28 * Volcanic Islands Dynamic default parameters.
29 */
30
31enum TONGAdpm_TrendDetection {
32 TONGAdpm_TrendDetection_AUTO,
33 TONGAdpm_TrendDetection_UP,
34 TONGAdpm_TrendDetection_DOWN
35};
36typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
37
38/* Bit vector representing same fields as hardware register. */
39#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */
40/* HDP_busy */
41/* IH_busy */
42/* DRM_busy */
43/* DRMDMA_busy */
44/* UVD_busy */
45/* VCE_busy */
46/* ACP_busy */
47/* SAMU_busy */
48/* AVP_busy */
49/* SDMA enabled */
50#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
51/* SH_Gfx_busy */
52/* RB_Gfx_busy */
53/* VCE_busy */
54
55#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
56/* FE_Gfx_busy */
57/* RB_Gfx_busy */
58/* ACP_busy */
59
60#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
61/* FE_Gfx_busy */
62/* SH_Gfx_busy */
63/* UVD_busy */
64
65#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */
66/* VCE_busy */
67/* ACP_busy */
68/* SAMU_busy */
69
70#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */
71#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */
72#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */
73
74
75/* thermal protection counter (units).*/
76#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
77
78/* static screen threshold unit */
79#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0
80
81/* static screen threshold */
82#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8
83
84/* gfx idle clock stop threshold */
85#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
86
87/* Fixed reference divider to use when building baby stepping tables. */
88#define PPTONGA_REFERENCEDIVIDER_DFLT 4
89
90/*
91 * ULV voltage change delay time
92 * Used to be delay_vreg in N.I. split for S.I.
93 * Using N.I. delay_vreg value as default
94 * ReferenceClock = 2700
95 * VoltageResponseTime = 1000
96 * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
97 */
98
99#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687
100
101#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035
102#define PPTONGA_CGULVCONTROL_DFLT 0x00007450
103#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */
104#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */
105
106#endif
107
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
new file mode 100644
index 000000000000..44a925006479
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -0,0 +1,6075 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27#include "pp_acpi.h"
28#include "hwmgr.h"
29#include <atombios.h>
30#include "tonga_hwmgr.h"
31#include "pptable.h"
32#include "processpptables.h"
33#include "tonga_processpptables.h"
34#include "tonga_pptable.h"
35#include "pp_debug.h"
36#include "tonga_ppsmc.h"
37#include "cgs_common.h"
38#include "pppcielanes.h"
39#include "tonga_dyn_defaults.h"
40#include "smumgr.h"
41#include "tonga_smumgr.h"
42#include "tonga_clockpowergating.h"
43#include "tonga_thermal.h"
44
45#include "smu/smu_7_1_2_d.h"
46#include "smu/smu_7_1_2_sh_mask.h"
47
48#include "gmc/gmc_8_1_d.h"
49#include "gmc/gmc_8_1_sh_mask.h"
50
51#include "bif/bif_5_0_d.h"
52#include "bif/bif_5_0_sh_mask.h"
53
54#include "cgs_linux.h"
55#include "eventmgr.h"
56#include "amd_pcie_helpers.h"
57
58#define MC_CG_ARB_FREQ_F0 0x0a
59#define MC_CG_ARB_FREQ_F1 0x0b
60#define MC_CG_ARB_FREQ_F2 0x0c
61#define MC_CG_ARB_FREQ_F3 0x0d
62
63#define MC_CG_SEQ_DRAMCONF_S0 0x05
64#define MC_CG_SEQ_DRAMCONF_S1 0x06
65#define MC_CG_SEQ_YCLK_SUSPEND 0x04
66#define MC_CG_SEQ_YCLK_RESUME 0x0a
67
68#define PCIE_BUS_CLK 10000
69#define TCLK (PCIE_BUS_CLK / 10)
70
71#define SMC_RAM_END 0x40000
72#define SMC_CG_IND_START 0xc0030000
73#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
74
75#define VOLTAGE_SCALE 4
76#define VOLTAGE_VID_OFFSET_SCALE1 625
77#define VOLTAGE_VID_OFFSET_SCALE2 100
78
79#define VDDC_VDDCI_DELTA 200
80#define VDDC_VDDGFX_DELTA 300
81
82#define MC_SEQ_MISC0_GDDR5_SHIFT 28
83#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
84#define MC_SEQ_MISC0_GDDR5_VALUE 5
85
86typedef uint32_t PECI_RegistryValue;
87
88/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
89uint16_t PP_ClockStretcherLookupTable[2][4] = {
90 {600, 1050, 3, 0},
91 {600, 1050, 6, 1} };
92
93/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
94uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
95 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
96 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
97
98/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
99uint8_t PP_ClockStretchAmountConversion[2][6] = {
100 {0, 1, 3, 2, 4, 5},
101 {0, 2, 4, 5, 6, 5} };
102
103/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
104enum DPM_EVENT_SRC {
105 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
106 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
107 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
108 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
109 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
110};
111typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
112
113const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
114
115struct tonga_power_state *cast_phw_tonga_power_state(
116 struct pp_hw_power_state *hw_ps)
117{
118 if (hw_ps == NULL)
119 return NULL;
120
121 PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
122 "Invalid Powerstate Type!",
123 return NULL);
124
125 return (struct tonga_power_state *)hw_ps;
126}
127
128const struct tonga_power_state *cast_const_phw_tonga_power_state(
129 const struct pp_hw_power_state *hw_ps)
130{
131 if (hw_ps == NULL)
132 return NULL;
133
134 PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
135 "Invalid Powerstate Type!",
136 return NULL);
137
138 return (const struct tonga_power_state *)hw_ps;
139}
140
141int tonga_add_voltage(struct pp_hwmgr *hwmgr,
142 phm_ppt_v1_voltage_lookup_table *look_up_table,
143 phm_ppt_v1_voltage_lookup_record *record)
144{
145 uint32_t i;
146 PP_ASSERT_WITH_CODE((NULL != look_up_table),
147 "Lookup Table empty.", return -1;);
148 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
149 "Lookup Table empty.", return -1;);
150 PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count),
151 "Lookup Table is full.", return -1;);
152
153 /* This is to avoid entering duplicate calculated records. */
154 for (i = 0; i < look_up_table->count; i++) {
155 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
156 if (look_up_table->entries[i].us_calculated == 1)
157 return 0;
158 else
159 break;
160 }
161 }
162
163 look_up_table->entries[i].us_calculated = 1;
164 look_up_table->entries[i].us_vdd = record->us_vdd;
165 look_up_table->entries[i].us_cac_low = record->us_cac_low;
166 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
167 look_up_table->entries[i].us_cac_high = record->us_cac_high;
168 /* Only increment the count when we're appending, not replacing duplicate entry. */
169 if (i == look_up_table->count)
170 look_up_table->count++;
171
172 return 0;
173}
174
175int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
176{
177 PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
178
179 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
180}
181
182uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
183 uint32_t voltage)
184{
185 uint8_t count = (uint8_t) (voltage_table->count);
186 uint8_t i = 0;
187
188 PP_ASSERT_WITH_CODE((NULL != voltage_table),
189 "Voltage Table empty.", return 0;);
190 PP_ASSERT_WITH_CODE((0 != count),
191 "Voltage Table empty.", return 0;);
192
193 for (i = 0; i < count; i++) {
194 /* find first voltage bigger than requested */
195 if (voltage_table->entries[i].value >= voltage)
196 return i;
197 }
198
199 /* voltage is bigger than max voltage in the table */
200 return i - 1;
201}
202
203/**
204 * @brief PhwTonga_GetVoltageOrder
205 * Returns index of requested voltage record in lookup(table)
206 * @param hwmgr - pointer to hardware manager
207 * @param lookupTable - lookup list to search in
208 * @param voltage - voltage to look for
209 * @return 0 on success
210 */
211uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
212 uint16_t voltage)
213{
214 uint8_t count = (uint8_t) (look_up_table->count);
215 uint8_t i;
216
217 PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
218 PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
219
220 for (i = 0; i < count; i++) {
221 /* find first voltage equal or bigger than requested */
222 if (look_up_table->entries[i].us_vdd >= voltage)
223 return i;
224 }
225
226 /* voltage is bigger than max voltage in the table */
227 return i-1;
228}
229
230bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
231{
232 /*
233 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
234 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
235 * whereas voltage control is a fundemental change that will not be disabled
236 */
237
238 return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
239 FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
240}
241
242/**
243 * Re-generate the DPM level mask value
244 * @param hwmgr the address of the hardware manager
245 */
246static uint32_t tonga_get_dpm_level_enable_mask_value(
247 struct tonga_single_dpm_table * dpm_table)
248{
249 uint32_t i;
250 uint32_t mask_value = 0;
251
252 for (i = dpm_table->count; i > 0; i--) {
253 mask_value = mask_value << 1;
254
255 if (dpm_table->dpm_levels[i-1].enabled)
256 mask_value |= 0x1;
257 else
258 mask_value &= 0xFFFFFFFE;
259 }
260 return mask_value;
261}
262
263/**
264 * Retrieve DPM default values from registry (if available)
265 *
266 * @param hwmgr the address of the powerplay hardware manager.
267 */
268void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
269{
270 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
271 phw_tonga_ulv_parm *ulv = &(data->ulv);
272 uint32_t tmp;
273
274 ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT;
275 data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0;
276 data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1;
277 data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2;
278 data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3;
279 data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4;
280 data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5;
281 data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6;
282 data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7;
283
284 data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT;
285 data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT;
286
287 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
288 PHM_PlatformCaps_ABM);
289 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
290 PHM_PlatformCaps_NonABMSupportInPPLib);
291
292 tmp = 0;
293 if (tmp == 0)
294 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
295 PHM_PlatformCaps_DynamicACTiming);
296
297 tmp = 0;
298 if (0 != tmp)
299 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
300 PHM_PlatformCaps_DisableMemoryTransition);
301
302 data->mclk_strobe_mode_threshold = 40000;
303 data->mclk_stutter_mode_threshold = 30000;
304 data->mclk_edc_enable_threshold = 40000;
305 data->mclk_edc_wr_enable_threshold = 40000;
306
307 tmp = 0;
308 if (tmp != 0)
309 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
310 PHM_PlatformCaps_DisableMCLS);
311
312 data->pcie_gen_performance.max = PP_PCIEGen1;
313 data->pcie_gen_performance.min = PP_PCIEGen3;
314 data->pcie_gen_power_saving.max = PP_PCIEGen1;
315 data->pcie_gen_power_saving.min = PP_PCIEGen3;
316
317 data->pcie_lane_performance.max = 0;
318 data->pcie_lane_performance.min = 16;
319 data->pcie_lane_power_saving.max = 0;
320 data->pcie_lane_power_saving.min = 16;
321
322 tmp = 0;
323
324 if (tmp)
325 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
326 PHM_PlatformCaps_SclkThrottleLowNotification);
327
328 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
329 PHM_PlatformCaps_DynamicUVDState);
330
331}
332
333int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
334{
335 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
336
337 int result = 0;
338 uint32_t low_sclk_interrupt_threshold = 0;
339
340 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
341 PHM_PlatformCaps_SclkThrottleLowNotification)
342 && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
343 data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
344 low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
345
346 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
347
348 result = tonga_copy_bytes_to_smc(
349 hwmgr->smumgr,
350 data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable,
351 LowSclkInterruptThreshold),
352 (uint8_t *)&low_sclk_interrupt_threshold,
353 sizeof(uint32_t),
354 data->sram_end
355 );
356 }
357
358 return result;
359}
360
361/**
362 * Find SCLK value that is associated with specified virtual_voltage_Id.
363 *
364 * @param hwmgr the address of the powerplay hardware manager.
365 * @param virtual_voltage_Id voltageId to look for.
366 * @param sclk output value .
367 * @return always 0 if success and 2 if association not found
368 */
369static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
370 phm_ppt_v1_voltage_lookup_table *lookup_table,
371 uint16_t virtual_voltage_id, uint32_t *sclk)
372{
373 uint8_t entryId;
374 uint8_t voltageId;
375 struct phm_ppt_v1_information *pptable_info =
376 (struct phm_ppt_v1_information *)(hwmgr->pptable);
377
378 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1);
379
380 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
381 for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) {
382 voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd;
383 if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
384 break;
385 }
386
387 PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count,
388 "Can't find requested voltage id in vdd_dep_on_sclk table!",
389 return -1;
390 );
391
392 *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk;
393
394 return 0;
395}
396
397/**
398 * Get Leakage VDDC based on leakage ID.
399 *
400 * @param hwmgr the address of the powerplay hardware manager.
401 * @return 2 if vddgfx returned is greater than 2V or if BIOS
402 */
403int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
404{
405 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
406 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
407 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
408 uint16_t virtual_voltage_id;
409 uint16_t vddc = 0;
410 uint16_t vddgfx = 0;
411 uint16_t i, j;
412 uint32_t sclk = 0;
413
414 /* retrieve voltage for leakage ID (0xff01 + i) */
415 for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) {
416 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
417
418 /* in split mode we should have only vddgfx EVV leakages */
419 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
420 if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
421 pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) {
422 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
423 PHM_PlatformCaps_ClockStretcher)) {
424 for (j = 1; j < sclk_table->count; j++) {
425 if (sclk_table->entries[j].clk == sclk &&
426 sclk_table->entries[j].cks_enable == 0) {
427 sclk += 5000;
428 break;
429 }
430 }
431 }
432 PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk
433 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
434 virtual_voltage_id, &vddgfx),
435 "Error retrieving EVV voltage value!", continue);
436
437 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
438 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
439
440 /* the voltage should not be zero nor equal to leakage ID */
441 if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
442 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
443 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
444 data->vddcgfx_leakage.count++;
445 }
446 }
447 } else {
448 /* in merged mode we have only vddc EVV leakages */
449 if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
450 pptable_info->vddc_lookup_table,
451 virtual_voltage_id, &sclk)) {
452 PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk
453 (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
454 virtual_voltage_id, &vddc),
455 "Error retrieving EVV voltage value!", continue);
456
457 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
458 if (vddc > 2000)
459 printk(KERN_ERR "[ powerplay ] Invalid VDDC value! \n");
460
461 /* the voltage should not be zero nor equal to leakage ID */
462 if (vddc != 0 && vddc != virtual_voltage_id) {
463 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
464 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
465 data->vddc_leakage.count++;
466 }
467 }
468 }
469 }
470
471 return 0;
472}
473
474int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
475{
476 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
477
478 /* enable SCLK dpm */
479 if (0 == data->sclk_dpm_key_disabled) {
480 PP_ASSERT_WITH_CODE(
481 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
482 PPSMC_MSG_DPM_Enable)),
483 "Failed to enable SCLK DPM during DPM Start Function!",
484 return -1);
485 }
486
487 /* enable MCLK dpm */
488 if (0 == data->mclk_dpm_key_disabled) {
489 PP_ASSERT_WITH_CODE(
490 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
491 PPSMC_MSG_MCLKDPM_Enable)),
492 "Failed to enable MCLK DPM during DPM Start Function!",
493 return -1);
494
495 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
496
497 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
498 ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
499 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
500 ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
501 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
502 ixLCAC_CPL_CNTL, 0x100005);/*Read */
503
504 udelay(10);
505
506 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
507 ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
508 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
509 ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
510 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
511 ixLCAC_CPL_CNTL, 0x500005);/* write */
512
513 }
514
515 return 0;
516}
517
518int tonga_start_dpm(struct pp_hwmgr *hwmgr)
519{
520 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
521
522 /* enable general power management */
523 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
524 /* enable sclk deep sleep */
525 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
526
527 /* prepare for PCIE DPM */
528 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
529 offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000);
530
531 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
532
533 PP_ASSERT_WITH_CODE(
534 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
535 PPSMC_MSG_Voltage_Cntl_Enable)),
536 "Failed to enable voltage DPM during DPM Start Function!",
537 return -1);
538
539 if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) {
540 PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
541 }
542
543 /* enable PCIE dpm */
544 if (0 == data->pcie_dpm_key_disabled) {
545 PP_ASSERT_WITH_CODE(
546 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
547 PPSMC_MSG_PCIeDPM_Enable)),
548 "Failed to enable pcie DPM during DPM Start Function!",
549 return -1
550 );
551 }
552
553 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
554 PHM_PlatformCaps_Falcon_QuickTransition)) {
555 smum_send_msg_to_smc(hwmgr->smumgr,
556 PPSMC_MSG_EnableACDCGPIOInterrupt);
557 }
558
559 return 0;
560}
561
562int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
563{
564 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
565
566 /* disable SCLK dpm */
567 if (0 == data->sclk_dpm_key_disabled) {
568 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
569 PP_ASSERT_WITH_CODE(
570 (0 == tonga_is_dpm_running(hwmgr)),
571 "Trying to Disable SCLK DPM when DPM is disabled",
572 return -1
573 );
574
575 PP_ASSERT_WITH_CODE(
576 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
577 PPSMC_MSG_DPM_Disable)),
578 "Failed to disable SCLK DPM during DPM stop Function!",
579 return -1);
580 }
581
582 /* disable MCLK dpm */
583 if (0 == data->mclk_dpm_key_disabled) {
584 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
585 PP_ASSERT_WITH_CODE(
586 (0 == tonga_is_dpm_running(hwmgr)),
587 "Trying to Disable MCLK DPM when DPM is disabled",
588 return -1
589 );
590
591 PP_ASSERT_WITH_CODE(
592 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
593 PPSMC_MSG_MCLKDPM_Disable)),
594 "Failed to Disable MCLK DPM during DPM stop Function!",
595 return -1);
596 }
597
598 return 0;
599}
600
601int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
602{
603 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
604
605 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0);
606 /* disable sclk deep sleep*/
607 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0);
608
609 /* disable PCIE dpm */
610 if (0 == data->pcie_dpm_key_disabled) {
611 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
612 PP_ASSERT_WITH_CODE(
613 (0 == tonga_is_dpm_running(hwmgr)),
614 "Trying to Disable PCIE DPM when DPM is disabled",
615 return -1
616 );
617 PP_ASSERT_WITH_CODE(
618 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
619 PPSMC_MSG_PCIeDPM_Disable)),
620 "Failed to disable pcie DPM during DPM stop Function!",
621 return -1);
622 }
623
624 if (0 != tonga_disable_sclk_mclk_dpm(hwmgr))
625 PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1);
626
627 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
628 PP_ASSERT_WITH_CODE(
629 (0 == tonga_is_dpm_running(hwmgr)),
630 "Trying to Disable Voltage CNTL when DPM is disabled",
631 return -1
632 );
633
634 PP_ASSERT_WITH_CODE(
635 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
636 PPSMC_MSG_Voltage_Cntl_Disable)),
637 "Failed to disable voltage DPM during DPM stop Function!",
638 return -1);
639
640 return 0;
641}
642
643int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr)
644{
645 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
646
647 return 0;
648}
649
650/**
651 * Send a message to the SMC and return a parameter
652 *
653 * @param hwmgr: the address of the powerplay hardware manager.
654 * @param msg: the message to send.
655 * @param parameter: pointer to the received parameter
656 * @return The response that came from the SMC.
657 */
658PPSMC_Result tonga_send_msg_to_smc_return_parameter(
659 struct pp_hwmgr *hwmgr,
660 PPSMC_Msg msg,
661 uint32_t *parameter)
662{
663 int result;
664
665 result = smum_send_msg_to_smc(hwmgr->smumgr, msg);
666
667 if ((0 == result) && parameter) {
668 *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
669 }
670
671 return result;
672}
673
674/**
675 * force DPM power State
676 *
677 * @param hwmgr: the address of the powerplay hardware manager.
678 * @param n : DPM level
679 * @return The response that came from the SMC.
680 */
681int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
682{
683 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
684 uint32_t level_mask = 1 << n;
685
686 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
687 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
688 "Trying to force SCLK when DPM is disabled", return -1;);
689 if (0 == data->sclk_dpm_key_disabled)
690 return (0 == smum_send_msg_to_smc_with_parameter(
691 hwmgr->smumgr,
692 (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask),
693 level_mask) ? 0 : 1);
694
695 return 0;
696}
697
698/**
699 * force DPM power State
700 *
701 * @param hwmgr: the address of the powerplay hardware manager.
702 * @param n : DPM level
703 * @return The response that came from the SMC.
704 */
705int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
706{
707 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
708 uint32_t level_mask = 1 << n;
709
710 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
711 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
712 "Trying to Force MCLK when DPM is disabled", return -1;);
713 if (0 == data->mclk_dpm_key_disabled)
714 return (0 == smum_send_msg_to_smc_with_parameter(
715 hwmgr->smumgr,
716 (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask),
717 level_mask) ? 0 : 1);
718
719 return 0;
720}
721
722/**
723 * force DPM power State
724 *
725 * @param hwmgr: the address of the powerplay hardware manager.
726 * @param n : DPM level
727 * @return The response that came from the SMC.
728 */
729int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
730{
731 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
732
733 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
734 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
735 "Trying to Force PCIE level when DPM is disabled", return -1;);
736 if (0 == data->pcie_dpm_key_disabled)
737 return (0 == smum_send_msg_to_smc_with_parameter(
738 hwmgr->smumgr,
739 (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel),
740 n) ? 0 : 1);
741
742 return 0;
743}
744
745/**
746 * Set the initial state by calling SMC to switch to this state directly
747 *
748 * @param hwmgr the address of the powerplay hardware manager.
749 * @return always 0
750 */
751int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
752{
753 /*
754 * SMC only stores one state that SW will ask to switch too,
755 * so we switch the the just uploaded one
756 */
757 return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1;
758}
759
760/**
761 * Get the location of various tables inside the FW image.
762 *
763 * @param hwmgr the address of the powerplay hardware manager.
764 * @return always 0
765 */
766int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
767{
768 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
769 struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
770
771 uint32_t tmp;
772 int result;
773 bool error = 0;
774
775 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
776 SMU72_FIRMWARE_HEADER_LOCATION +
777 offsetof(SMU72_Firmware_Header, DpmTable),
778 &tmp, data->sram_end);
779
780 if (0 == result) {
781 data->dpm_table_start = tmp;
782 }
783
784 error |= (0 != result);
785
786 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
787 SMU72_FIRMWARE_HEADER_LOCATION +
788 offsetof(SMU72_Firmware_Header, SoftRegisters),
789 &tmp, data->sram_end);
790
791 if (0 == result) {
792 data->soft_regs_start = tmp;
793 tonga_smu->ulSoftRegsStart = tmp;
794 }
795
796 error |= (0 != result);
797
798
799 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
800 SMU72_FIRMWARE_HEADER_LOCATION +
801 offsetof(SMU72_Firmware_Header, mcRegisterTable),
802 &tmp, data->sram_end);
803
804 if (0 == result) {
805 data->mc_reg_table_start = tmp;
806 }
807
808 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
809 SMU72_FIRMWARE_HEADER_LOCATION +
810 offsetof(SMU72_Firmware_Header, FanTable),
811 &tmp, data->sram_end);
812
813 if (0 == result) {
814 data->fan_table_start = tmp;
815 }
816
817 error |= (0 != result);
818
819 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
820 SMU72_FIRMWARE_HEADER_LOCATION +
821 offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
822 &tmp, data->sram_end);
823
824 if (0 == result) {
825 data->arb_table_start = tmp;
826 }
827
828 error |= (0 != result);
829
830
831 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
832 SMU72_FIRMWARE_HEADER_LOCATION +
833 offsetof(SMU72_Firmware_Header, Version),
834 &tmp, data->sram_end);
835
836 if (0 == result) {
837 hwmgr->microcode_version_info.SMC = tmp;
838 }
839
840 error |= (0 != result);
841
842 return error ? 1 : 0;
843}
844
845/**
846 * Read clock related registers.
847 *
848 * @param hwmgr the address of the powerplay hardware manager.
849 * @return always 0
850 */
851int tonga_read_clock_registers(struct pp_hwmgr *hwmgr)
852{
853 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
854
855 data->clock_registers.vCG_SPLL_FUNC_CNTL =
856 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
857 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
858 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
859 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
860 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
861 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
862 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
863 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
864 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
865 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
866 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
867 data->clock_registers.vDLL_CNTL =
868 cgs_read_register(hwmgr->device, mmDLL_CNTL);
869 data->clock_registers.vMCLK_PWRMGT_CNTL =
870 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
871 data->clock_registers.vMPLL_AD_FUNC_CNTL =
872 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
873 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
874 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
875 data->clock_registers.vMPLL_FUNC_CNTL =
876 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
877 data->clock_registers.vMPLL_FUNC_CNTL_1 =
878 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
879 data->clock_registers.vMPLL_FUNC_CNTL_2 =
880 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
881 data->clock_registers.vMPLL_SS1 =
882 cgs_read_register(hwmgr->device, mmMPLL_SS1);
883 data->clock_registers.vMPLL_SS2 =
884 cgs_read_register(hwmgr->device, mmMPLL_SS2);
885
886 return 0;
887}
888
889/**
890 * Find out if memory is GDDR5.
891 *
892 * @param hwmgr the address of the powerplay hardware manager.
893 * @return always 0
894 */
895int tonga_get_memory_type(struct pp_hwmgr *hwmgr)
896{
897 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
898 uint32_t temp;
899
900 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
901
902 data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
903 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
904 MC_SEQ_MISC0_GDDR5_SHIFT));
905
906 return 0;
907}
908
909/**
910 * Enables Dynamic Power Management by SMC
911 *
912 * @param hwmgr the address of the powerplay hardware manager.
913 * @return always 0
914 */
915int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
916{
917 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
918
919 return 0;
920}
921
922/**
923 * Initialize PowerGating States for different engines
924 *
925 * @param hwmgr the address of the powerplay hardware manager.
926 * @return always 0
927 */
928int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
929{
930 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
931
932 data->uvd_power_gated = 0;
933 data->vce_power_gated = 0;
934 data->samu_power_gated = 0;
935 data->acp_power_gated = 0;
936 data->pg_acp_init = 1;
937
938 return 0;
939}
940
941/**
942 * Checks if DPM is enabled
943 *
944 * @param hwmgr the address of the powerplay hardware manager.
945 * @return always 0
946 */
947int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
948{
949 /*
950 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
951 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
952 * whereas voltage control is a fundemental change that will not be disabled
953 */
954 return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1);
955}
956
957/**
958 * Checks if DPM is stopped
959 *
960 * @param hwmgr the address of the powerplay hardware manager.
961 * @return always 0
962 */
963int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
964{
965 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
966
967 if (0 != tonga_is_dpm_running(hwmgr)) {
968 /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
969 if (!data->dpm_table_start) {
970 return 1;
971 }
972 }
973
974 return 0;
975}
976
977/**
978 * Remove repeated voltage values and create table with unique values.
979 *
980 * @param hwmgr the address of the powerplay hardware manager.
981 * @param voltage_table the pointer to changing voltage table
982 * @return 1 in success
983 */
984
985static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
986 pp_atomctrl_voltage_table *voltage_table)
987{
988 uint32_t table_size, i, j;
989 uint16_t vvalue;
990 bool bVoltageFound = 0;
991 pp_atomctrl_voltage_table *table;
992
993 PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
994 table_size = sizeof(pp_atomctrl_voltage_table);
995 table = kzalloc(table_size, GFP_KERNEL);
996
997 if (NULL == table)
998 return -ENOMEM;
999
1000 memset(table, 0x00, table_size);
1001 table->mask_low = voltage_table->mask_low;
1002 table->phase_delay = voltage_table->phase_delay;
1003
1004 for (i = 0; i < voltage_table->count; i++) {
1005 vvalue = voltage_table->entries[i].value;
1006 bVoltageFound = 0;
1007
1008 for (j = 0; j < table->count; j++) {
1009 if (vvalue == table->entries[j].value) {
1010 bVoltageFound = 1;
1011 break;
1012 }
1013 }
1014
1015 if (!bVoltageFound) {
1016 table->entries[table->count].value = vvalue;
1017 table->entries[table->count].smio_low =
1018 voltage_table->entries[i].smio_low;
1019 table->count++;
1020 }
1021 }
1022
1023 memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table));
1024
1025 kfree(table);
1026
1027 return 0;
1028}
1029
1030static int tonga_get_svi2_vdd_ci_voltage_table(
1031 struct pp_hwmgr *hwmgr,
1032 phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table)
1033{
1034 uint32_t i;
1035 int result;
1036 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1037 pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table);
1038
1039 PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count),
1040 "Voltage Dependency Table empty.", return -1;);
1041
1042 vddci_voltage_table->mask_low = 0;
1043 vddci_voltage_table->phase_delay = 0;
1044 vddci_voltage_table->count = voltage_dependency_table->count;
1045
1046 for (i = 0; i < voltage_dependency_table->count; i++) {
1047 vddci_voltage_table->entries[i].value =
1048 voltage_dependency_table->entries[i].vddci;
1049 vddci_voltage_table->entries[i].smio_low = 0;
1050 }
1051
1052 result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table);
1053 PP_ASSERT_WITH_CODE((0 == result),
1054 "Failed to trim VDDCI table.", return result;);
1055
1056 return 0;
1057}
1058
1059
1060
1061static int tonga_get_svi2_vdd_voltage_table(
1062 struct pp_hwmgr *hwmgr,
1063 phm_ppt_v1_voltage_lookup_table *look_up_table,
1064 pp_atomctrl_voltage_table *voltage_table)
1065{
1066 uint8_t i = 0;
1067
1068 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1069 "Voltage Lookup Table empty.", return -1;);
1070
1071 voltage_table->mask_low = 0;
1072 voltage_table->phase_delay = 0;
1073
1074 voltage_table->count = look_up_table->count;
1075
1076 for (i = 0; i < voltage_table->count; i++) {
1077 voltage_table->entries[i].value = look_up_table->entries[i].us_vdd;
1078 voltage_table->entries[i].smio_low = 0;
1079 }
1080
1081 return 0;
1082}
1083
1084/*
1085 * -------------------------------------------------------- Voltage Tables --------------------------------------------------------------------------
1086 * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries.
1087 */
1088
1089static void tonga_trim_voltage_table_to_fit_state_table(
1090 struct pp_hwmgr *hwmgr,
1091 uint32_t max_voltage_steps,
1092 pp_atomctrl_voltage_table *voltage_table)
1093{
1094 unsigned int i, diff;
1095
1096 if (voltage_table->count <= max_voltage_steps) {
1097 return;
1098 }
1099
1100 diff = voltage_table->count - max_voltage_steps;
1101
1102 for (i = 0; i < max_voltage_steps; i++) {
1103 voltage_table->entries[i] = voltage_table->entries[i + diff];
1104 }
1105
1106 voltage_table->count = max_voltage_steps;
1107
1108 return;
1109}
1110
1111/**
1112 * Create Voltage Tables.
1113 *
1114 * @param hwmgr the address of the powerplay hardware manager.
1115 * @return always 0
1116 */
1117int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1118{
1119 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1120 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1121 int result;
1122
1123 /* MVDD has only GPIO voltage control */
1124 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1125 result = atomctrl_get_voltage_table_v3(hwmgr,
1126 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
1127 PP_ASSERT_WITH_CODE((0 == result),
1128 "Failed to retrieve MVDD table.", return result;);
1129 }
1130
1131 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1132 /* GPIO voltage */
1133 result = atomctrl_get_voltage_table_v3(hwmgr,
1134 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
1135 PP_ASSERT_WITH_CODE((0 == result),
1136 "Failed to retrieve VDDCI table.", return result;);
1137 } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1138 /* SVI2 voltage */
1139 result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr,
1140 pptable_info->vdd_dep_on_mclk);
1141 PP_ASSERT_WITH_CODE((0 == result),
1142 "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
1143 }
1144
1145 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1146 /* VDDGFX has only SVI2 voltage control */
1147 result = tonga_get_svi2_vdd_voltage_table(hwmgr,
1148 pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table));
1149 PP_ASSERT_WITH_CODE((0 == result),
1150 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
1151 }
1152
1153 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1154 /* VDDC has only SVI2 voltage control */
1155 result = tonga_get_svi2_vdd_voltage_table(hwmgr,
1156 pptable_info->vddc_lookup_table, &(data->vddc_voltage_table));
1157 PP_ASSERT_WITH_CODE((0 == result),
1158 "Failed to retrieve SVI2 VDDC table from lookup table.", return result;);
1159 }
1160
1161 PP_ASSERT_WITH_CODE(
1162 (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)),
1163 "Too many voltage values for VDDC. Trimming to fit state table.",
1164 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1165 SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
1166 );
1167
1168 PP_ASSERT_WITH_CODE(
1169 (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)),
1170 "Too many voltage values for VDDGFX. Trimming to fit state table.",
1171 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1172 SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table));
1173 );
1174
1175 PP_ASSERT_WITH_CODE(
1176 (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)),
1177 "Too many voltage values for VDDCI. Trimming to fit state table.",
1178 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1179 SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
1180 );
1181
1182 PP_ASSERT_WITH_CODE(
1183 (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)),
1184 "Too many voltage values for MVDD. Trimming to fit state table.",
1185 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1186 SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
1187 );
1188
1189 return 0;
1190}
1191
1192/**
1193 * Vddc table preparation for SMC.
1194 *
1195 * @param hwmgr the address of the hardware manager
1196 * @param table the SMC DPM table structure to be populated
1197 * @return always 0
1198 */
1199static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
1200 SMU72_Discrete_DpmTable *table)
1201{
1202 unsigned int count;
1203 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1204
1205 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1206 table->VddcLevelCount = data->vddc_voltage_table.count;
1207 for (count = 0; count < table->VddcLevelCount; count++) {
1208 table->VddcTable[count] =
1209 PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
1210 }
1211 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
1212 }
1213 return 0;
1214}
1215
1216/**
1217 * VddGfx table preparation for SMC.
1218 *
1219 * @param hwmgr the address of the hardware manager
1220 * @param table the SMC DPM table structure to be populated
1221 * @return always 0
1222 */
1223static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
1224 SMU72_Discrete_DpmTable *table)
1225{
1226 unsigned int count;
1227 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1228
1229 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1230 table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
1231 for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
1232 table->VddGfxTable[count] =
1233 PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
1234 }
1235 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
1236 }
1237 return 0;
1238}
1239
1240/**
1241 * Vddci table preparation for SMC.
1242 *
1243 * @param *hwmgr The address of the hardware manager.
1244 * @param *table The SMC DPM table structure to be populated.
1245 * @return 0
1246 */
1247static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
1248 SMU72_Discrete_DpmTable *table)
1249{
1250 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1251 uint32_t count;
1252
1253 table->VddciLevelCount = data->vddci_voltage_table.count;
1254 for (count = 0; count < table->VddciLevelCount; count++) {
1255 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1256 table->VddciTable[count] =
1257 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1258 } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1259 table->SmioTable1.Pattern[count].Voltage =
1260 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1261 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
1262 table->SmioTable1.Pattern[count].Smio =
1263 (uint8_t) count;
1264 table->Smio[count] |=
1265 data->vddci_voltage_table.entries[count].smio_low;
1266 table->VddciTable[count] =
1267 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1268 }
1269 }
1270
1271 table->SmioMask1 = data->vddci_voltage_table.mask_low;
1272 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
1273
1274 return 0;
1275}
1276
1277/**
1278 * Mvdd table preparation for SMC.
1279 *
1280 * @param *hwmgr The address of the hardware manager.
1281 * @param *table The SMC DPM table structure to be populated.
1282 * @return 0
1283 */
1284static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1285 SMU72_Discrete_DpmTable *table)
1286{
1287 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1288 uint32_t count;
1289
1290 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1291 table->MvddLevelCount = data->mvdd_voltage_table.count;
1292 for (count = 0; count < table->MvddLevelCount; count++) {
1293 table->SmioTable2.Pattern[count].Voltage =
1294 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
1295 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
1296 table->SmioTable2.Pattern[count].Smio =
1297 (uint8_t) count;
1298 table->Smio[count] |=
1299 data->mvdd_voltage_table.entries[count].smio_low;
1300 }
1301 table->SmioMask2 = data->vddci_voltage_table.mask_low;
1302
1303 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1304 }
1305
1306 return 0;
1307}
1308
1309/**
1310 * Convert a voltage value in mv unit to VID number required by SMU firmware
1311 */
1312static uint8_t convert_to_vid(uint16_t vddc)
1313{
1314 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
1315}
1316
1317
1318/**
1319 * Preparation of vddc and vddgfx CAC tables for SMC.
1320 *
1321 * @param hwmgr the address of the hardware manager
1322 * @param table the SMC DPM table structure to be populated
1323 * @return always 0
1324 */
1325static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
1326 SMU72_Discrete_DpmTable *table)
1327{
1328 uint32_t count;
1329 uint8_t index;
1330 int result = 0;
1331 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1332 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1333 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
1334 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table;
1335
1336 /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */
1337 uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
1338 uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
1339
1340 for (count = 0; count < vddcLevelCount; count++) {
1341 /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
1342 index = tonga_get_voltage_index(vddc_lookup_table,
1343 data->vddc_voltage_table.entries[count].value);
1344 table->BapmVddcVidLoSidd[count] =
1345 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
1346 table->BapmVddcVidHiSidd[count] =
1347 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
1348 table->BapmVddcVidHiSidd2[count] =
1349 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
1350 }
1351
1352 if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) {
1353 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
1354 for (count = 0; count < vddgfxLevelCount; count++) {
1355 index = tonga_get_voltage_index(vddgfx_lookup_table,
1356 data->vddgfx_voltage_table.entries[count].value);
1357 table->BapmVddGfxVidLoSidd[count] =
1358 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low);
1359 table->BapmVddGfxVidHiSidd[count] =
1360 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid);
1361 table->BapmVddGfxVidHiSidd2[count] =
1362 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
1363 }
1364 } else {
1365 for (count = 0; count < vddcLevelCount; count++) {
1366 index = tonga_get_voltage_index(vddc_lookup_table,
1367 data->vddc_voltage_table.entries[count].value);
1368 table->BapmVddGfxVidLoSidd[count] =
1369 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
1370 table->BapmVddGfxVidHiSidd[count] =
1371 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
1372 table->BapmVddGfxVidHiSidd2[count] =
1373 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
1374 }
1375 }
1376
1377 return result;
1378}
1379
1380
1381/**
1382 * Preparation of voltage tables for SMC.
1383 *
1384 * @param hwmgr the address of the hardware manager
1385 * @param table the SMC DPM table structure to be populated
1386 * @return always 0
1387 */
1388
1389int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1390 SMU72_Discrete_DpmTable *table)
1391{
1392 int result;
1393
1394 result = tonga_populate_smc_vddc_table(hwmgr, table);
1395 PP_ASSERT_WITH_CODE(0 == result,
1396 "can not populate VDDC voltage table to SMC", return -1);
1397
1398 result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
1399 PP_ASSERT_WITH_CODE(0 == result,
1400 "can not populate VDDCI voltage table to SMC", return -1);
1401
1402 result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
1403 PP_ASSERT_WITH_CODE(0 == result,
1404 "can not populate VDDGFX voltage table to SMC", return -1);
1405
1406 result = tonga_populate_smc_mvdd_table(hwmgr, table);
1407 PP_ASSERT_WITH_CODE(0 == result,
1408 "can not populate MVDD voltage table to SMC", return -1);
1409
1410 result = tonga_populate_cac_tables(hwmgr, table);
1411 PP_ASSERT_WITH_CODE(0 == result,
1412 "can not populate CAC voltage tables to SMC", return -1);
1413
1414 return 0;
1415}
1416
1417/**
1418 * Populates the SMC VRConfig field in DPM table.
1419 *
1420 * @param hwmgr the address of the hardware manager
1421 * @param table the SMC DPM table structure to be populated
1422 * @return always 0
1423 */
1424static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
1425 SMU72_Discrete_DpmTable *table)
1426{
1427 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1428 uint16_t config;
1429
1430 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1431 /* Splitted mode */
1432 config = VR_SVI2_PLANE_1;
1433 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1434
1435 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1436 config = VR_SVI2_PLANE_2;
1437 table->VRConfig |= config;
1438 } else {
1439 printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n");
1440 }
1441 } else {
1442 /* Merged mode */
1443 config = VR_MERGED_WITH_VDDC;
1444 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1445
1446 /* Set Vddc Voltage Controller */
1447 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1448 config = VR_SVI2_PLANE_1;
1449 table->VRConfig |= config;
1450 } else {
1451 printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n");
1452 }
1453 }
1454
1455 /* Set Vddci Voltage Controller */
1456 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1457 config = VR_SVI2_PLANE_2; /* only in merged mode */
1458 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1459 } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1460 config = VR_SMIO_PATTERN_1;
1461 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1462 }
1463
1464 /* Set Mvdd Voltage Controller */
1465 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1466 config = VR_SMIO_PATTERN_2;
1467 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1468 }
1469
1470 return 0;
1471}
1472
1473static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
1474 phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
1475 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1476{
1477 uint32_t i = 0;
1478 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1479 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1480
1481 /* clock - voltage dependency table is empty table */
1482 if (allowed_clock_voltage_table->count == 0)
1483 return -1;
1484
1485 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1486 /* find first sclk bigger than request */
1487 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1488 voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1489 allowed_clock_voltage_table->entries[i].vddgfx);
1490
1491 voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1492 allowed_clock_voltage_table->entries[i].vddc);
1493
1494 if (allowed_clock_voltage_table->entries[i].vddci) {
1495 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1496 allowed_clock_voltage_table->entries[i].vddci);
1497 } else {
1498 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1499 allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta);
1500 }
1501
1502 if (allowed_clock_voltage_table->entries[i].mvdd) {
1503 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
1504 }
1505
1506 voltage->Phases = 1;
1507 return 0;
1508 }
1509 }
1510
1511 /* sclk is bigger than max sclk in the dependence table */
1512 voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1513 allowed_clock_voltage_table->entries[i-1].vddgfx);
1514 voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1515 allowed_clock_voltage_table->entries[i-1].vddc);
1516
1517 if (allowed_clock_voltage_table->entries[i-1].vddci) {
1518 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1519 allowed_clock_voltage_table->entries[i-1].vddci);
1520 }
1521 if (allowed_clock_voltage_table->entries[i-1].mvdd) {
1522 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
1523 }
1524
1525 return 0;
1526}
1527
1528/**
1529 * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
1530 *
1531 * @param hwmgr the address of the powerplay hardware manager.
1532 * @return always 0
1533 */
1534int tonga_reset_to_default(struct pp_hwmgr *hwmgr)
1535{
1536 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1;
1537}
1538
1539int tonga_populate_memory_timing_parameters(
1540 struct pp_hwmgr *hwmgr,
1541 uint32_t engine_clock,
1542 uint32_t memory_clock,
1543 struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
1544 )
1545{
1546 uint32_t dramTiming;
1547 uint32_t dramTiming2;
1548 uint32_t burstTime;
1549 int result;
1550
1551 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1552 engine_clock, memory_clock);
1553
1554 PP_ASSERT_WITH_CODE(result == 0,
1555 "Error calling VBIOS to set DRAM_TIMING.", return result);
1556
1557 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1558 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1559 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1560
1561 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1562 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1563 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1564
1565 return 0;
1566}
1567
1568/**
1569 * Setup parameters for the MC ARB.
1570 *
1571 * @param hwmgr the address of the powerplay hardware manager.
1572 * @return always 0
1573 * This function is to be called from the SetPowerState table.
1574 */
1575int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1576{
1577 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1578 int result = 0;
1579 SMU72_Discrete_MCArbDramTimingTable arb_regs;
1580 uint32_t i, j;
1581
1582 memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
1583
1584 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1585 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1586 result = tonga_populate_memory_timing_parameters
1587 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1588 data->dpm_table.mclk_table.dpm_levels[j].value,
1589 &arb_regs.entries[i][j]);
1590
1591 if (0 != result) {
1592 break;
1593 }
1594 }
1595 }
1596
1597 if (0 == result) {
1598 result = tonga_copy_bytes_to_smc(
1599 hwmgr->smumgr,
1600 data->arb_table_start,
1601 (uint8_t *)&arb_regs,
1602 sizeof(SMU72_Discrete_MCArbDramTimingTable),
1603 data->sram_end
1604 );
1605 }
1606
1607 return result;
1608}
1609
1610static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
1611{
1612 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1613 struct tonga_dpm_table *dpm_table = &data->dpm_table;
1614 uint32_t i;
1615
1616 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
1617 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1618 table->LinkLevel[i].PcieGenSpeed =
1619 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1620 table->LinkLevel[i].PcieLaneCount =
1621 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1622 table->LinkLevel[i].EnabledForActivity =
1623 1;
1624 table->LinkLevel[i].SPC =
1625 (uint8_t)(data->pcie_spc_cap & 0xff);
1626 table->LinkLevel[i].DownThreshold =
1627 PP_HOST_TO_SMC_UL(5);
1628 table->LinkLevel[i].UpThreshold =
1629 PP_HOST_TO_SMC_UL(30);
1630 }
1631
1632 data->smc_state_table.LinkLevelCount =
1633 (uint8_t)dpm_table->pcie_speed_table.count;
1634 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1635 tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1636
1637 return 0;
1638}
1639
1640static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1641 SMU72_Discrete_DpmTable *table)
1642{
1643 int result = 0;
1644
1645 uint8_t count;
1646 pp_atomctrl_clock_dividers_vi dividers;
1647 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1648 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1649 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1650
1651 table->UvdLevelCount = (uint8_t) (mm_table->count);
1652 table->UvdBootLevel = 0;
1653
1654 for (count = 0; count < table->UvdLevelCount; count++) {
1655 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1656 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1657 table->UvdLevel[count].MinVoltage.Vddc =
1658 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1659 mm_table->entries[count].vddc);
1660 table->UvdLevel[count].MinVoltage.VddGfx =
1661 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1662 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1663 mm_table->entries[count].vddgfx) : 0;
1664 table->UvdLevel[count].MinVoltage.Vddci =
1665 tonga_get_voltage_id(&data->vddci_voltage_table,
1666 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1667 table->UvdLevel[count].MinVoltage.Phases = 1;
1668
1669 /* retrieve divider value for VBIOS */
1670 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1671 table->UvdLevel[count].VclkFrequency, &dividers);
1672 PP_ASSERT_WITH_CODE((0 == result),
1673 "can not find divide id for Vclk clock", return result);
1674
1675 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1676
1677 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1678 table->UvdLevel[count].DclkFrequency, &dividers);
1679 PP_ASSERT_WITH_CODE((0 == result),
1680 "can not find divide id for Dclk clock", return result);
1681
1682 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1683
1684 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1685 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1686 //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage);
1687 }
1688
1689 return result;
1690
1691}
1692
1693static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1694 SMU72_Discrete_DpmTable *table)
1695{
1696 int result = 0;
1697
1698 uint8_t count;
1699 pp_atomctrl_clock_dividers_vi dividers;
1700 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1701 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1702 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1703
1704 table->VceLevelCount = (uint8_t) (mm_table->count);
1705 table->VceBootLevel = 0;
1706
1707 for (count = 0; count < table->VceLevelCount; count++) {
1708 table->VceLevel[count].Frequency =
1709 mm_table->entries[count].eclk;
1710 table->VceLevel[count].MinVoltage.Vddc =
1711 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1712 mm_table->entries[count].vddc);
1713 table->VceLevel[count].MinVoltage.VddGfx =
1714 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1715 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1716 mm_table->entries[count].vddgfx) : 0;
1717 table->VceLevel[count].MinVoltage.Vddci =
1718 tonga_get_voltage_id(&data->vddci_voltage_table,
1719 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1720 table->VceLevel[count].MinVoltage.Phases = 1;
1721
1722 /* retrieve divider value for VBIOS */
1723 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1724 table->VceLevel[count].Frequency, &dividers);
1725 PP_ASSERT_WITH_CODE((0 == result),
1726 "can not find divide id for VCE engine clock", return result);
1727
1728 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1729
1730 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1731 }
1732
1733 return result;
1734}
1735
1736static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1737 SMU72_Discrete_DpmTable *table)
1738{
1739 int result = 0;
1740 uint8_t count;
1741 pp_atomctrl_clock_dividers_vi dividers;
1742 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1743 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1744 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1745
1746 table->AcpLevelCount = (uint8_t) (mm_table->count);
1747 table->AcpBootLevel = 0;
1748
1749 for (count = 0; count < table->AcpLevelCount; count++) {
1750 table->AcpLevel[count].Frequency =
1751 pptable_info->mm_dep_table->entries[count].aclk;
1752 table->AcpLevel[count].MinVoltage.Vddc =
1753 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1754 mm_table->entries[count].vddc);
1755 table->AcpLevel[count].MinVoltage.VddGfx =
1756 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1757 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1758 mm_table->entries[count].vddgfx) : 0;
1759 table->AcpLevel[count].MinVoltage.Vddci =
1760 tonga_get_voltage_id(&data->vddci_voltage_table,
1761 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1762 table->AcpLevel[count].MinVoltage.Phases = 1;
1763
1764 /* retrieve divider value for VBIOS */
1765 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1766 table->AcpLevel[count].Frequency, &dividers);
1767 PP_ASSERT_WITH_CODE((0 == result),
1768 "can not find divide id for engine clock", return result);
1769
1770 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1771
1772 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1773 }
1774
1775 return result;
1776}
1777
1778static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1779 SMU72_Discrete_DpmTable *table)
1780{
1781 int result = 0;
1782 uint8_t count;
1783 pp_atomctrl_clock_dividers_vi dividers;
1784 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1785 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1786 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1787
1788 table->SamuBootLevel = 0;
1789 table->SamuLevelCount = (uint8_t) (mm_table->count);
1790
1791 for (count = 0; count < table->SamuLevelCount; count++) {
1792 /* not sure whether we need evclk or not */
1793 table->SamuLevel[count].Frequency =
1794 pptable_info->mm_dep_table->entries[count].samclock;
1795 table->SamuLevel[count].MinVoltage.Vddc =
1796 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1797 mm_table->entries[count].vddc);
1798 table->SamuLevel[count].MinVoltage.VddGfx =
1799 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1800 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1801 mm_table->entries[count].vddgfx) : 0;
1802 table->SamuLevel[count].MinVoltage.Vddci =
1803 tonga_get_voltage_id(&data->vddci_voltage_table,
1804 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1805 table->SamuLevel[count].MinVoltage.Phases = 1;
1806
1807 /* retrieve divider value for VBIOS */
1808 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1809 table->SamuLevel[count].Frequency, &dividers);
1810 PP_ASSERT_WITH_CODE((0 == result),
1811 "can not find divide id for samu clock", return result);
1812
1813 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1814
1815 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1816 }
1817
1818 return result;
1819}
1820
1821/**
1822 * Populates the SMC MCLK structure using the provided memory clock
1823 *
1824 * @param hwmgr the address of the hardware manager
1825 * @param memory_clock the memory clock to use to populate the structure
1826 * @param sclk the SMC SCLK structure to be populated
1827 */
1828static int tonga_calculate_mclk_params(
1829 struct pp_hwmgr *hwmgr,
1830 uint32_t memory_clock,
1831 SMU72_Discrete_MemoryLevel *mclk,
1832 bool strobe_mode,
1833 bool dllStateOn
1834 )
1835{
1836 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1837 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1838 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1839 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1840 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1841 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1842 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1843 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1844 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1845 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1846
1847 pp_atomctrl_memory_clock_param mpll_param;
1848 int result;
1849
1850 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1851 memory_clock, &mpll_param, strobe_mode);
1852 PP_ASSERT_WITH_CODE(0 == result,
1853 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1854
1855 /* MPLL_FUNC_CNTL setup*/
1856 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1857
1858 /* MPLL_FUNC_CNTL_1 setup*/
1859 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1860 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1861 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1862 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1863 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1864 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1865
1866 /* MPLL_AD_FUNC_CNTL setup*/
1867 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1868 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1869
1870 if (data->is_memory_GDDR5) {
1871 /* MPLL_DQ_FUNC_CNTL setup*/
1872 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1873 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1874 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1875 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1876 }
1877
1878 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1879 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1880 /*
1881 ************************************
1882 Fref = Reference Frequency
1883 NF = Feedback divider ratio
1884 NR = Reference divider ratio
1885 Fnom = Nominal VCO output frequency = Fref * NF / NR
1886 Fs = Spreading Rate
1887 D = Percentage down-spread / 2
1888 Fint = Reference input frequency to PFD = Fref / NR
1889 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
1890 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
1891 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
1892 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
1893 *************************************
1894 */
1895 pp_atomctrl_internal_ss_info ss_info;
1896 uint32_t freq_nom;
1897 uint32_t tmp;
1898 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1899
1900 /* for GDDR5 for all modes and DDR3 */
1901 if (1 == mpll_param.qdr)
1902 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1903 else
1904 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1905
1906 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1907 tmp = (freq_nom / reference_clock);
1908 tmp = tmp * tmp;
1909
1910 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1911 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
1912 /* ss.Info.speed_spectrum_rate -- in unit of khz */
1913 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
1914 /* = reference_clock * 5 / speed_spectrum_rate */
1915 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1916
1917 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
1918 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
1919 uint32_t clkv =
1920 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1921 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1922
1923 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1924 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1925 }
1926 }
1927
1928 /* MCLK_PWRMGT_CNTL setup */
1929 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1930 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1931 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1932 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1933 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1934 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1935
1936
1937 /* Save the result data to outpupt memory level structure */
1938 mclk->MclkFrequency = memory_clock;
1939 mclk->MpllFuncCntl = mpll_func_cntl;
1940 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1941 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1942 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1943 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1944 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1945 mclk->DllCntl = dll_cntl;
1946 mclk->MpllSs1 = mpll_ss1;
1947 mclk->MpllSs2 = mpll_ss2;
1948
1949 return 0;
1950}
1951
1952static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
1953 bool strobe_mode)
1954{
1955 uint8_t mc_para_index;
1956
1957 if (strobe_mode) {
1958 if (memory_clock < 12500) {
1959 mc_para_index = 0x00;
1960 } else if (memory_clock > 47500) {
1961 mc_para_index = 0x0f;
1962 } else {
1963 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1964 }
1965 } else {
1966 if (memory_clock < 65000) {
1967 mc_para_index = 0x00;
1968 } else if (memory_clock > 135000) {
1969 mc_para_index = 0x0f;
1970 } else {
1971 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1972 }
1973 }
1974
1975 return mc_para_index;
1976}
1977
1978static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1979{
1980 uint8_t mc_para_index;
1981
1982 if (memory_clock < 10000) {
1983 mc_para_index = 0;
1984 } else if (memory_clock >= 80000) {
1985 mc_para_index = 0x0f;
1986 } else {
1987 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1988 }
1989
1990 return mc_para_index;
1991}
1992
1993static int tonga_populate_single_memory_level(
1994 struct pp_hwmgr *hwmgr,
1995 uint32_t memory_clock,
1996 SMU72_Discrete_MemoryLevel *memory_level
1997 )
1998{
1999 uint32_t minMvdd = 0;
2000 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2001 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2002 int result = 0;
2003 bool dllStateOn;
2004 struct cgs_display_info info = {0};
2005
2006
2007 if (NULL != pptable_info->vdd_dep_on_mclk) {
2008 result = tonga_get_dependecy_volt_by_clk(hwmgr,
2009 pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd);
2010 PP_ASSERT_WITH_CODE((0 == result),
2011 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
2012 }
2013
2014 if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) {
2015 memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
2016 } else {
2017 memory_level->MinMvdd = minMvdd;
2018 }
2019 memory_level->EnabledForThrottle = 1;
2020 memory_level->EnabledForActivity = 0;
2021 memory_level->UpHyst = 0;
2022 memory_level->DownHyst = 100;
2023 memory_level->VoltageDownHyst = 0;
2024
2025 /* Indicates maximum activity level for this performance level.*/
2026 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
2027 memory_level->StutterEnable = 0;
2028 memory_level->StrobeEnable = 0;
2029 memory_level->EdcReadEnable = 0;
2030 memory_level->EdcWriteEnable = 0;
2031 memory_level->RttEnable = 0;
2032
2033 /* default set to low watermark. Highest level will be set to high later.*/
2034 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2035
2036 cgs_get_active_displays_info(hwmgr->device, &info);
2037 data->display_timing.num_existing_displays = info.display_count;
2038
2039 if ((data->mclk_stutter_mode_threshold != 0) &&
2040 (memory_clock <= data->mclk_stutter_mode_threshold) &&
2041 (data->is_uvd_enabled == 0)
2042#if defined(LINUX)
2043 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
2044 && (data->display_timing.num_existing_displays <= 2)
2045 && (data->display_timing.num_existing_displays != 0)
2046#endif
2047 )
2048 memory_level->StutterEnable = 1;
2049
2050 /* decide strobe mode*/
2051 memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
2052 (memory_clock <= data->mclk_strobe_mode_threshold);
2053
2054 /* decide EDC mode and memory clock ratio*/
2055 if (data->is_memory_GDDR5) {
2056 memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
2057 memory_level->StrobeEnable);
2058
2059 if ((data->mclk_edc_enable_threshold != 0) &&
2060 (memory_clock > data->mclk_edc_enable_threshold)) {
2061 memory_level->EdcReadEnable = 1;
2062 }
2063
2064 if ((data->mclk_edc_wr_enable_threshold != 0) &&
2065 (memory_clock > data->mclk_edc_wr_enable_threshold)) {
2066 memory_level->EdcWriteEnable = 1;
2067 }
2068
2069 if (memory_level->StrobeEnable) {
2070 if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
2071 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
2072 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
2073 } else {
2074 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
2075 }
2076
2077 } else {
2078 dllStateOn = data->dll_defaule_on;
2079 }
2080 } else {
2081 memory_level->StrobeRatio =
2082 tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
2083 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
2084 }
2085
2086 result = tonga_calculate_mclk_params(hwmgr,
2087 memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
2088
2089 if (0 == result) {
2090 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
2091 /* MCLK frequency in units of 10KHz*/
2092 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
2093 /* Indicates maximum activity level for this performance level.*/
2094 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
2095 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
2096 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
2097 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
2098 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
2099 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
2100 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
2101 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
2102 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
2103 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
2104 }
2105
2106 return result;
2107}
2108
2109/**
2110 * Populates the SMC MVDD structure using the provided memory clock.
2111 *
2112 * @param hwmgr the address of the hardware manager
2113 * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2114 * @param voltage the SMC VOLTAGE structure to be populated
2115 */
2116int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern)
2117{
2118 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2119 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2120 uint32_t i = 0;
2121
2122 if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2123 /* find mvdd value which clock is more than request */
2124 for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) {
2125 if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) {
2126 /* Always round to higher voltage. */
2127 smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value;
2128 break;
2129 }
2130 }
2131
2132 PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count,
2133 "MVDD Voltage is outside the supported range.", return -1);
2134
2135 } else {
2136 return -1;
2137 }
2138
2139 return 0;
2140}
2141
2142
2143static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr,
2144 SMU72_Discrete_DpmTable *table)
2145{
2146 int result = 0;
2147 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2148 pp_atomctrl_clock_dividers_vi dividers;
2149 SMIO_Pattern voltage_level;
2150 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2151 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2152 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
2153 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
2154
2155 /* The ACPI state should not do DPM on DC (or ever).*/
2156 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2157
2158 table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage;
2159
2160 /* assign zero for now*/
2161 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
2162
2163 /* get the engine clock dividers for this clock value*/
2164 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2165 table->ACPILevel.SclkFrequency, &dividers);
2166
2167 PP_ASSERT_WITH_CODE(result == 0,
2168 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2169
2170 /* divider ID for required SCLK*/
2171 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2172 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2173 table->ACPILevel.DeepSleepDivId = 0;
2174
2175 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2176 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
2177 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2178 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
2179 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
2180 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
2181
2182 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2183 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2184 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2185 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2186 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2187 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2188 table->ACPILevel.CcPwrDynRm = 0;
2189 table->ACPILevel.CcPwrDynRm1 = 0;
2190
2191
2192 /* For various features to be enabled/disabled while this level is active.*/
2193 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2194 /* SCLK frequency in units of 10KHz*/
2195 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2196 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2197 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2198 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2199 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2200 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2201 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2202 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2203 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2204
2205 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
2206 table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage;
2207
2208 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
2209
2210 if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
2211 table->MemoryACPILevel.MinMvdd =
2212 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
2213 else
2214 table->MemoryACPILevel.MinMvdd = 0;
2215
2216 /* Force reset on DLL*/
2217 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2218 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
2219 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2220 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
2221
2222 /* Disable DLL in ACPIState*/
2223 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2224 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
2225 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2226 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
2227
2228 /* Enable DLL bypass signal*/
2229 dll_cntl = PHM_SET_FIELD(dll_cntl,
2230 DLL_CNTL, MRDCK0_BYPASS, 0);
2231 dll_cntl = PHM_SET_FIELD(dll_cntl,
2232 DLL_CNTL, MRDCK1_BYPASS, 0);
2233
2234 table->MemoryACPILevel.DllCntl =
2235 PP_HOST_TO_SMC_UL(dll_cntl);
2236 table->MemoryACPILevel.MclkPwrmgtCntl =
2237 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
2238 table->MemoryACPILevel.MpllAdFuncCntl =
2239 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
2240 table->MemoryACPILevel.MpllDqFuncCntl =
2241 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
2242 table->MemoryACPILevel.MpllFuncCntl =
2243 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
2244 table->MemoryACPILevel.MpllFuncCntl_1 =
2245 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
2246 table->MemoryACPILevel.MpllFuncCntl_2 =
2247 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
2248 table->MemoryACPILevel.MpllSs1 =
2249 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
2250 table->MemoryACPILevel.MpllSs2 =
2251 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
2252
2253 table->MemoryACPILevel.EnabledForThrottle = 0;
2254 table->MemoryACPILevel.EnabledForActivity = 0;
2255 table->MemoryACPILevel.UpHyst = 0;
2256 table->MemoryACPILevel.DownHyst = 100;
2257 table->MemoryACPILevel.VoltageDownHyst = 0;
2258 /* Indicates maximum activity level for this performance level.*/
2259 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2260
2261 table->MemoryACPILevel.StutterEnable = 0;
2262 table->MemoryACPILevel.StrobeEnable = 0;
2263 table->MemoryACPILevel.EdcReadEnable = 0;
2264 table->MemoryACPILevel.EdcWriteEnable = 0;
2265 table->MemoryACPILevel.RttEnable = 0;
2266
2267 return result;
2268}
2269
2270static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
2271{
2272 int result = 0;
2273 uint32_t i;
2274
2275 for (i = 0; i < table->count; i++) {
2276 if (value == table->dpm_levels[i].value) {
2277 *boot_level = i;
2278 result = 0;
2279 }
2280 }
2281 return result;
2282}
2283
2284static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
2285 SMU72_Discrete_DpmTable *table)
2286{
2287 int result = 0;
2288 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2289
2290 table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */
2291 table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */
2292
2293 /* find boot level from dpm table*/
2294 result = tonga_find_boot_level(&(data->dpm_table.sclk_table),
2295 data->vbios_boot_state.sclk_bootup_value,
2296 (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
2297
2298 if (0 != result) {
2299 data->smc_state_table.GraphicsBootLevel = 0;
2300 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
2301 in dependency table. Using Graphics DPM level 0!");
2302 result = 0;
2303 }
2304
2305 result = tonga_find_boot_level(&(data->dpm_table.mclk_table),
2306 data->vbios_boot_state.mclk_bootup_value,
2307 (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
2308
2309 if (0 != result) {
2310 data->smc_state_table.MemoryBootLevel = 0;
2311 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
2312 in dependency table. Using Memory DPM level 0!");
2313 result = 0;
2314 }
2315
2316 table->BootVoltage.Vddc =
2317 tonga_get_voltage_id(&(data->vddc_voltage_table),
2318 data->vbios_boot_state.vddc_bootup_value);
2319 table->BootVoltage.VddGfx =
2320 tonga_get_voltage_id(&(data->vddgfx_voltage_table),
2321 data->vbios_boot_state.vddgfx_bootup_value);
2322 table->BootVoltage.Vddci =
2323 tonga_get_voltage_id(&(data->vddci_voltage_table),
2324 data->vbios_boot_state.vddci_bootup_value);
2325 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
2326
2327 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
2328
2329 return result;
2330}
2331
2332
2333/**
2334 * Calculates the SCLK dividers using the provided engine clock
2335 *
2336 * @param hwmgr the address of the hardware manager
2337 * @param engine_clock the engine clock to use to populate the structure
2338 * @param sclk the SMC SCLK structure to be populated
2339 */
2340int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
2341 uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
2342{
2343 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2344 pp_atomctrl_clock_dividers_vi dividers;
2345 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2346 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2347 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2348 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2349 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2350 uint32_t reference_clock;
2351 uint32_t reference_divider;
2352 uint32_t fbdiv;
2353 int result;
2354
2355 /* get the engine clock dividers for this clock value*/
2356 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
2357
2358 PP_ASSERT_WITH_CODE(result == 0,
2359 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2360
2361 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
2362 reference_clock = atomctrl_get_reference_clock(hwmgr);
2363
2364 reference_divider = 1 + dividers.uc_pll_ref_div;
2365
2366 /* low 14 bits is fraction and high 12 bits is divider*/
2367 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
2368
2369 /* SPLL_FUNC_CNTL setup*/
2370 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2371 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
2372 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2373 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
2374
2375 /* SPLL_FUNC_CNTL_3 setup*/
2376 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2377 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
2378
2379 /* set to use fractional accumulation*/
2380 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2381 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
2382
2383 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2384 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
2385 pp_atomctrl_internal_ss_info ss_info;
2386
2387 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
2388 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
2389 /*
2390 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
2391 * ss_info.speed_spectrum_rate -- in unit of khz
2392 */
2393 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
2394 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
2395
2396 /* clkv = 2 * D * fbdiv / NS */
2397 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
2398
2399 cg_spll_spread_spectrum =
2400 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
2401 cg_spll_spread_spectrum =
2402 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
2403 cg_spll_spread_spectrum_2 =
2404 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
2405 }
2406 }
2407
2408 sclk->SclkFrequency = engine_clock;
2409 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2410 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2411 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2412 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2413 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
2414
2415 return 0;
2416}
2417
2418/**
2419 * Populates single SMC SCLK structure using the provided engine clock
2420 *
2421 * @param hwmgr the address of the hardware manager
2422 * @param engine_clock the engine clock to use to populate the structure
2423 * @param sclk the SMC SCLK structure to be populated
2424 */
2425static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level)
2426{
2427 int result;
2428 uint32_t threshold;
2429 uint32_t mvdd;
2430 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2431 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2432
2433 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
2434
2435
2436 /* populate graphics levels*/
2437 result = tonga_get_dependecy_volt_by_clk(hwmgr,
2438 pptable_info->vdd_dep_on_sclk, engine_clock,
2439 &graphic_level->MinVoltage, &mvdd);
2440 PP_ASSERT_WITH_CODE((0 == result),
2441 "can not find VDDC voltage value for VDDC \
2442 engine clock dependency table", return result);
2443
2444 /* SCLK frequency in units of 10KHz*/
2445 graphic_level->SclkFrequency = engine_clock;
2446
2447 /* Indicates maximum activity level for this performance level. 50% for now*/
2448 graphic_level->ActivityLevel = sclk_activity_level_threshold;
2449
2450 graphic_level->CcPwrDynRm = 0;
2451 graphic_level->CcPwrDynRm1 = 0;
2452 /* this level can be used if activity is high enough.*/
2453 graphic_level->EnabledForActivity = 0;
2454 /* this level can be used for throttling.*/
2455 graphic_level->EnabledForThrottle = 1;
2456 graphic_level->UpHyst = 0;
2457 graphic_level->DownHyst = 0;
2458 graphic_level->VoltageDownHyst = 0;
2459 graphic_level->PowerThrottle = 0;
2460
2461 threshold = engine_clock * data->fast_watemark_threshold / 100;
2462/*
2463 *get the DAL clock. do it in funture.
2464 PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
2465 data->display_timing.min_clock_insr = minClocks.engineClockInSR;
2466
2467 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
2468 {
2469 graphic_level->DeepSleepDivId = PhwTonga_GetSleepDividerIdFromClock(hwmgr, engine_clock, minClocks.engineClockInSR);
2470 }
2471*/
2472
2473 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
2474 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2475
2476 if (0 == result) {
2477 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
2478 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
2479 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
2480 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
2481 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
2482 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
2483 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
2484 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
2485 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
2486 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
2487 }
2488
2489 return result;
2490}
2491
2492/**
2493 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
2494 *
2495 * @param hwmgr the address of the hardware manager
2496 */
2497static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
2498{
2499 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2500 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2501 struct tonga_dpm_table *dpm_table = &data->dpm_table;
2502 phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
2503 uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
2504 int result = 0;
2505 uint32_t level_array_adress = data->dpm_table_start +
2506 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
2507 uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
2508 SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/
2509 SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
2510 uint32_t i, maxEntry;
2511 uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
2512 PECI_RegistryValue reg_value;
2513 memset(levels, 0x00, level_array_size);
2514
2515 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2516 result = tonga_populate_single_graphic_level(hwmgr,
2517 dpm_table->sclk_table.dpm_levels[i].value,
2518 (uint16_t)data->activity_target[i],
2519 &(data->smc_state_table.GraphicsLevel[i]));
2520
2521 if (0 != result)
2522 return result;
2523
2524 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
2525 if (i > 1)
2526 data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
2527
2528 if (0 == i) {
2529 reg_value = 0;
2530 if (reg_value != 0)
2531 data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value;
2532 }
2533
2534 if (1 == i) {
2535 reg_value = 0;
2536 if (reg_value != 0)
2537 data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value;
2538 }
2539 }
2540
2541 /* Only enable level 0 for now. */
2542 data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
2543
2544 /* set highest level watermark to high */
2545 if (dpm_table->sclk_table.count > 1)
2546 data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
2547 PPSMC_DISPLAY_WATERMARK_HIGH;
2548
2549 data->smc_state_table.GraphicsDpmLevelCount =
2550 (uint8_t)dpm_table->sclk_table.count;
2551 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2552 tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2553
2554 if (pcie_table != NULL) {
2555 PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
2556 "There must be 1 or more PCIE levels defined in PPTable.", return -1);
2557 maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
2558 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2559 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
2560 (uint8_t) ((i < maxEntry) ? i : maxEntry);
2561 }
2562 } else {
2563 if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
2564 printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!");
2565
2566 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2567 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2568 (1<<(highest_pcie_level_enabled+1))) != 0)) {
2569 highest_pcie_level_enabled++;
2570 }
2571
2572 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2573 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2574 (1<<lowest_pcie_level_enabled)) == 0)) {
2575 lowest_pcie_level_enabled++;
2576 }
2577
2578 while ((count < highest_pcie_level_enabled) &&
2579 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2580 (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
2581 count++;
2582 }
2583 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
2584 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
2585
2586
2587 /* set pcieDpmLevel to highest_pcie_level_enabled*/
2588 for (i = 2; i < dpm_table->sclk_table.count; i++) {
2589 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
2590 }
2591
2592 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
2593 data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
2594
2595 /* set pcieDpmLevel to mid_pcie_level_enabled*/
2596 data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
2597 }
2598 /* level count will send to smc once at init smc table and never change*/
2599 result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2600
2601 if (0 != result)
2602 return result;
2603
2604 return 0;
2605}
2606
2607/**
2608 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2609 *
2610 * @param hwmgr the address of the hardware manager
2611 */
2612
2613static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2614{
2615 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2616 struct tonga_dpm_table *dpm_table = &data->dpm_table;
2617 int result;
2618 /* populate MCLK dpm table to SMU7 */
2619 uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
2620 uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY;
2621 SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
2622 uint32_t i;
2623
2624 memset(levels, 0x00, level_array_size);
2625
2626 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2627 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2628 "can not populate memory level as memory clock is zero", return -1);
2629 result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
2630 &(data->smc_state_table.MemoryLevel[i]));
2631 if (0 != result) {
2632 return result;
2633 }
2634 }
2635
2636 /* Only enable level 0 for now.*/
2637 data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
2638
2639 /*
2640 * in order to prevent MC activity from stutter mode to push DPM up.
2641 * the UVD change complements this by putting the MCLK in a higher state
2642 * by default such that we are not effected by up threshold or and MCLK DPM latency.
2643 */
2644 data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
2645 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
2646
2647 data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
2648 data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2649 /* set highest level watermark to high*/
2650 data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
2651
2652 /* level count will send to smc once at init smc table and never change*/
2653 result = tonga_copy_bytes_to_smc(hwmgr->smumgr,
2654 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2655
2656 if (0 != result) {
2657 return result;
2658 }
2659
2660 return 0;
2661}
2662
2663struct TONGA_DLL_SPEED_SETTING {
2664 uint16_t Min; /* Minimum Data Rate*/
2665 uint16_t Max; /* Maximum Data Rate*/
2666 uint32_t dll_speed; /* The desired DLL_SPEED setting*/
2667};
2668
2669static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
2670{
2671 return 0;
2672}
2673
2674/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
2675
2676
2677static int tonga_reset_single_dpm_table(
2678 struct pp_hwmgr *hwmgr,
2679 struct tonga_single_dpm_table *dpm_table,
2680 uint32_t count)
2681{
2682 uint32_t i;
2683 if (!(count <= MAX_REGULAR_DPM_NUMBER))
2684 printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
2685 table entries to exceed max number! \n");
2686
2687 dpm_table->count = count;
2688 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
2689 dpm_table->dpm_levels[i].enabled = 0;
2690 }
2691
2692 return 0;
2693}
2694
2695static void tonga_setup_pcie_table_entry(
2696 struct tonga_single_dpm_table *dpm_table,
2697 uint32_t index, uint32_t pcie_gen,
2698 uint32_t pcie_lanes)
2699{
2700 dpm_table->dpm_levels[index].value = pcie_gen;
2701 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2702 dpm_table->dpm_levels[index].enabled = 1;
2703}
2704
2705static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
2706{
2707 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2708 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2709 phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
2710 uint32_t i, maxEntry;
2711
2712 if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
2713 data->pcie_gen_power_saving = data->pcie_gen_performance;
2714 data->pcie_lane_power_saving = data->pcie_lane_performance;
2715 } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
2716 data->pcie_gen_performance = data->pcie_gen_power_saving;
2717 data->pcie_lane_performance = data->pcie_lane_power_saving;
2718 }
2719
2720 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK);
2721
2722 if (pcie_table != NULL) {
2723 /*
2724 * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue).
2725 * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry.
2726 */
2727 maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ?
2728 SMU72_MAX_LEVELS_LINK : pcie_table->count;
2729 for (i = 1; i < maxEntry; i++) {
2730 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1,
2731 get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed),
2732 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2733 }
2734 data->dpm_table.pcie_speed_table.count = maxEntry - 1;
2735 } else {
2736 /* Hardcode Pcie Table */
2737 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
2738 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2739 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2740 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
2741 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2742 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2743 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
2744 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2745 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2746 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
2747 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2748 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2749 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
2750 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2751 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2752 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
2753 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2754 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2755 data->dpm_table.pcie_speed_table.count = 6;
2756 }
2757 /* Populate last level for boot PCIE level, but do not increment count. */
2758 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
2759 data->dpm_table.pcie_speed_table.count,
2760 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2761 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2762
2763 return 0;
2764
2765}
2766
2767/*
2768 * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
2769 * Dynamic state patching function will then trim these state tables to the allowed range based
2770 * on the power policy or external client requests, such as UVD request, etc.
2771 */
2772static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
2773{
2774 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2775 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2776 uint32_t i;
2777
2778 phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table =
2779 pptable_info->vdd_dep_on_sclk;
2780 phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table =
2781 pptable_info->vdd_dep_on_mclk;
2782
2783 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
2784 "SCLK dependency table is missing. This table is mandatory", return -1);
2785 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
2786 "SCLK dependency table has to have is missing. This table is mandatory", return -1);
2787
2788 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
2789 "MCLK dependency table is missing. This table is mandatory", return -1);
2790 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
2791 "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
2792
2793 /* clear the state table to reset everything to default */
2794 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
2795 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS);
2796 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY);
2797 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */
2798 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/
2799 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/
2800 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/
2801
2802 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
2803 "SCLK dependency table is missing. This table is mandatory", return -1);
2804 /* Initialize Sclk DPM table based on allow Sclk values*/
2805 data->dpm_table.sclk_table.count = 0;
2806
2807 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
2808 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
2809 allowed_vdd_sclk_table->entries[i].clk) {
2810 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
2811 allowed_vdd_sclk_table->entries[i].clk;
2812 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
2813 data->dpm_table.sclk_table.count++;
2814 }
2815 }
2816
2817 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
2818 "MCLK dependency table is missing. This table is mandatory", return -1);
2819 /* Initialize Mclk DPM table based on allow Mclk values */
2820 data->dpm_table.mclk_table.count = 0;
2821 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
2822 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
2823 allowed_vdd_mclk_table->entries[i].clk) {
2824 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
2825 allowed_vdd_mclk_table->entries[i].clk;
2826 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
2827 data->dpm_table.mclk_table.count++;
2828 }
2829 }
2830
2831 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
2832 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
2833 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
2834 /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
2835 /* param1 is for corresponding std voltage */
2836 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
2837 }
2838 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
2839
2840 if (NULL != allowed_vdd_mclk_table) {
2841 /* Initialize Vddci DPM table based on allow Mclk values */
2842 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
2843 data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
2844 data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
2845 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
2846 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
2847 }
2848 data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
2849 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
2850 }
2851
2852 /* setup PCIE gen speed levels*/
2853 tonga_setup_default_pcie_tables(hwmgr);
2854
2855 /* save a copy of the default DPM table*/
2856 memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table));
2857
2858 return 0;
2859}
2860
2861int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
2862 const struct tonga_power_state *bootState)
2863{
2864 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2865 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2866 uint8_t count, level;
2867
2868 count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count);
2869 for (level = 0; level < count; level++) {
2870 if (pptable_info->vdd_dep_on_sclk->entries[level].clk >=
2871 bootState->performance_levels[0].engine_clock) {
2872 data->smc_state_table.GraphicsBootLevel = level;
2873 break;
2874 }
2875 }
2876
2877 count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count);
2878 for (level = 0; level < count; level++) {
2879 if (pptable_info->vdd_dep_on_mclk->entries[level].clk >=
2880 bootState->performance_levels[0].memory_clock) {
2881 data->smc_state_table.MemoryBootLevel = level;
2882 break;
2883 }
2884 }
2885
2886 return 0;
2887}
2888
2889/**
2890 * Initializes the SMC table and uploads it
2891 *
2892 * @param hwmgr the address of the powerplay hardware manager.
2893 * @param pInput the pointer to input data (PowerState)
2894 * @return always 0
2895 */
2896int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2897{
2898 int result;
2899 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2900 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2901 SMU72_Discrete_DpmTable *table = &(data->smc_state_table);
2902 const phw_tonga_ulv_parm *ulv = &(data->ulv);
2903 uint8_t i;
2904 PECI_RegistryValue reg_value;
2905 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2906
2907 result = tonga_setup_default_dpm_tables(hwmgr);
2908 PP_ASSERT_WITH_CODE(0 == result,
2909 "Failed to setup default DPM tables!", return result;);
2910 memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
2911 if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) {
2912 tonga_populate_smc_voltage_tables(hwmgr, table);
2913 }
2914
2915 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2916 PHM_PlatformCaps_AutomaticDCTransition)) {
2917 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2918 }
2919
2920 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2921 PHM_PlatformCaps_StepVddc)) {
2922 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2923 }
2924
2925 if (data->is_memory_GDDR5) {
2926 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2927 }
2928
2929 i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
2930
2931 if (i == 1 || i == 0) {
2932 table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL;
2933 }
2934
2935 if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) {
2936 PP_ASSERT_WITH_CODE(0 == result,
2937 "Failed to initialize ULV state!", return result;);
2938
2939 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2940 ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
2941 }
2942
2943 result = tonga_populate_smc_link_level(hwmgr, table);
2944 PP_ASSERT_WITH_CODE(0 == result,
2945 "Failed to initialize Link Level!", return result;);
2946
2947 result = tonga_populate_all_graphic_levels(hwmgr);
2948 PP_ASSERT_WITH_CODE(0 == result,
2949 "Failed to initialize Graphics Level!", return result;);
2950
2951 result = tonga_populate_all_memory_levels(hwmgr);
2952 PP_ASSERT_WITH_CODE(0 == result,
2953 "Failed to initialize Memory Level!", return result;);
2954
2955 result = tonga_populate_smv_acpi_level(hwmgr, table);
2956 PP_ASSERT_WITH_CODE(0 == result,
2957 "Failed to initialize ACPI Level!", return result;);
2958
2959 result = tonga_populate_smc_vce_level(hwmgr, table);
2960 PP_ASSERT_WITH_CODE(0 == result,
2961 "Failed to initialize VCE Level!", return result;);
2962
2963 result = tonga_populate_smc_acp_level(hwmgr, table);
2964 PP_ASSERT_WITH_CODE(0 == result,
2965 "Failed to initialize ACP Level!", return result;);
2966
2967 result = tonga_populate_smc_samu_level(hwmgr, table);
2968 PP_ASSERT_WITH_CODE(0 == result,
2969 "Failed to initialize SAMU Level!", return result;);
2970
2971 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2972 /* need to populate the ARB settings for the initial state. */
2973 result = tonga_program_memory_timing_parameters(hwmgr);
2974 PP_ASSERT_WITH_CODE(0 == result,
2975 "Failed to Write ARB settings for the initial state.", return result;);
2976
2977 result = tonga_populate_smc_uvd_level(hwmgr, table);
2978 PP_ASSERT_WITH_CODE(0 == result,
2979 "Failed to initialize UVD Level!", return result;);
2980
2981 result = tonga_populate_smc_boot_level(hwmgr, table);
2982 PP_ASSERT_WITH_CODE(0 == result,
2983 "Failed to initialize Boot Level!", return result;);
2984
2985 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2986 PHM_PlatformCaps_ClockStretcher)) {
2987 result = tonga_populate_clock_stretcher_data_table(hwmgr);
2988 PP_ASSERT_WITH_CODE(0 == result,
2989 "Failed to populate Clock Stretcher Data Table!", return result;);
2990 }
2991 table->GraphicsVoltageChangeEnable = 1;
2992 table->GraphicsThermThrottleEnable = 1;
2993 table->GraphicsInterval = 1;
2994 table->VoltageInterval = 1;
2995 table->ThermalInterval = 1;
2996 table->TemperatureLimitHigh =
2997 pptable_info->cac_dtp_table->usTargetOperatingTemp *
2998 TONGA_Q88_FORMAT_CONVERSION_UNIT;
2999 table->TemperatureLimitLow =
3000 (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) *
3001 TONGA_Q88_FORMAT_CONVERSION_UNIT;
3002 table->MemoryVoltageChangeEnable = 1;
3003 table->MemoryInterval = 1;
3004 table->VoltageResponseTime = 0;
3005 table->PhaseResponseTime = 0;
3006 table->MemoryThermThrottleEnable = 1;
3007
3008 /*
3009 * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had)
3010 * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
3011 * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay
3012 * To avoid it, we set PCIeBootLinkLevel to highest dpm level
3013 */
3014 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
3015 "There must be 1 or more PCIE levels defined in PPTable.",
3016 return -1);
3017
3018 table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
3019
3020 table->PCIeGenInterval = 1;
3021
3022 result = tonga_populate_vr_config(hwmgr, table);
3023 PP_ASSERT_WITH_CODE(0 == result,
3024 "Failed to populate VRConfig setting!", return result);
3025
3026 table->ThermGpio = 17;
3027 table->SclkStepSize = 0x4000;
3028
3029 reg_value = 0;
3030 if ((0 == reg_value) &&
3031 (0 == atomctrl_get_pp_assign_pin(hwmgr,
3032 VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) {
3033 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3034 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3035 PHM_PlatformCaps_RegulatorHot);
3036 } else {
3037 table->VRHotGpio = TONGA_UNUSED_GPIO_PIN;
3038 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3039 PHM_PlatformCaps_RegulatorHot);
3040 }
3041
3042 /* ACDC Switch GPIO */
3043 reg_value = 0;
3044 if ((0 == reg_value) &&
3045 (0 == atomctrl_get_pp_assign_pin(hwmgr,
3046 PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) {
3047 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3048 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3049 PHM_PlatformCaps_AutomaticDCTransition);
3050 } else {
3051 table->AcDcGpio = TONGA_UNUSED_GPIO_PIN;
3052 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3053 PHM_PlatformCaps_AutomaticDCTransition);
3054 }
3055
3056 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3057 PHM_PlatformCaps_Falcon_QuickTransition);
3058
3059 reg_value = 0;
3060 if (1 == reg_value) {
3061 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3062 PHM_PlatformCaps_AutomaticDCTransition);
3063 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3064 PHM_PlatformCaps_Falcon_QuickTransition);
3065 }
3066
3067 reg_value = 0;
3068 if ((0 == reg_value) &&
3069 (0 == atomctrl_get_pp_assign_pin(hwmgr,
3070 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
3071 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3072 PHM_PlatformCaps_ThermalOutGPIO);
3073
3074 table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3075
3076 table->ThermOutPolarity =
3077 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
3078 (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0;
3079
3080 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
3081
3082 /* if required, combine VRHot/PCC with thermal out GPIO*/
3083 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3084 PHM_PlatformCaps_RegulatorHot) &&
3085 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3086 PHM_PlatformCaps_CombinePCCWithThermalSignal)){
3087 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
3088 }
3089 } else {
3090 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3091 PHM_PlatformCaps_ThermalOutGPIO);
3092
3093 table->ThermOutGpio = 17;
3094 table->ThermOutPolarity = 1;
3095 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
3096 }
3097
3098 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) {
3099 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
3100 }
3101 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
3102 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
3103 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
3104 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
3105 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
3106 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
3107 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
3108 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
3109 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
3110
3111 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
3112 result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
3113 offsetof(SMU72_Discrete_DpmTable, SystemFlags),
3114 (uint8_t *)&(table->SystemFlags),
3115 sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController),
3116 data->sram_end);
3117
3118 PP_ASSERT_WITH_CODE(0 == result,
3119 "Failed to upload dpm data to SMC memory!", return result;);
3120
3121 return result;
3122}
3123
3124/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/
3125static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
3126{
3127 return;
3128}
3129
3130int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3131{
3132 PPSMC_Result result;
3133 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3134
3135 /* Apply minimum voltage based on DAL's request level */
3136 tonga_apply_dal_minimum_voltage_request(hwmgr);
3137
3138 if (0 == data->sclk_dpm_key_disabled) {
3139 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3140 if (0 != tonga_is_dpm_running(hwmgr))
3141 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3142
3143 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3144 result = smum_send_msg_to_smc_with_parameter(
3145 hwmgr->smumgr,
3146 (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
3147 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3148 PP_ASSERT_WITH_CODE((0 == result),
3149 "Set Sclk Dpm enable Mask failed", return -1);
3150 }
3151 }
3152
3153 if (0 == data->mclk_dpm_key_disabled) {
3154 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3155 if (0 != tonga_is_dpm_running(hwmgr))
3156 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3157
3158 if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3159 result = smum_send_msg_to_smc_with_parameter(
3160 hwmgr->smumgr,
3161 (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
3162 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3163 PP_ASSERT_WITH_CODE((0 == result),
3164 "Set Mclk Dpm enable Mask failed", return -1);
3165 }
3166 }
3167
3168 return 0;
3169}
3170
3171
3172int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr)
3173{
3174 uint32_t level, tmp;
3175 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3176
3177 if (0 == data->pcie_dpm_key_disabled) {
3178 /* PCIE */
3179 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
3180 level = 0;
3181 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3182 while (tmp >>= 1)
3183 level++ ;
3184
3185 if (0 != level) {
3186 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
3187 "force highest pcie dpm state failed!", return -1);
3188 }
3189 }
3190 }
3191
3192 if (0 == data->sclk_dpm_key_disabled) {
3193 /* SCLK */
3194 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
3195 level = 0;
3196 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3197 while (tmp >>= 1)
3198 level++ ;
3199
3200 if (0 != level) {
3201 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
3202 "force highest sclk dpm state failed!", return -1);
3203 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
3204 CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
3205 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3206 Curr_Sclk_Index does not match the level \n");
3207
3208 }
3209 }
3210 }
3211
3212 if (0 == data->mclk_dpm_key_disabled) {
3213 /* MCLK */
3214 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
3215 level = 0;
3216 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3217 while (tmp >>= 1)
3218 level++ ;
3219
3220 if (0 != level) {
3221 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
3222 "force highest mclk dpm state failed!", return -1);
3223 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3224 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
3225 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3226 Curr_Mclk_Index does not match the level \n");
3227 }
3228 }
3229 }
3230
3231 return 0;
3232}
3233
3234/**
3235 * Find the MC microcode version and store it in the HwMgr struct
3236 *
3237 * @param hwmgr the address of the powerplay hardware manager.
3238 * @return always 0
3239 */
3240int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
3241{
3242 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
3243
3244 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
3245
3246 return 0;
3247}
3248
3249/**
3250 * Initialize Dynamic State Adjustment Rule Settings
3251 *
3252 * @param hwmgr the address of the powerplay hardware manager.
3253 */
3254int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
3255{
3256 uint32_t table_size;
3257 struct phm_clock_voltage_dependency_table *table_clk_vlt;
3258 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3259
3260 hwmgr->dyn_state.mclk_sclk_ratio = 4;
3261 hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
3262 hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
3263
3264 /* initialize vddc_dep_on_dal_pwrl table */
3265 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
3266 table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
3267
3268 if (NULL == table_clk_vlt) {
3269 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
3270 return -ENOMEM;
3271 } else {
3272 table_clk_vlt->count = 4;
3273 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
3274 table_clk_vlt->entries[0].v = 0;
3275 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
3276 table_clk_vlt->entries[1].v = 720;
3277 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
3278 table_clk_vlt->entries[2].v = 810;
3279 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
3280 table_clk_vlt->entries[3].v = 900;
3281 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
3282 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
3283 }
3284
3285 return 0;
3286}
3287
3288static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
3289{
3290 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3291 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3292
3293 phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
3294 pptable_info->vdd_dep_on_sclk;
3295 phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
3296 pptable_info->vdd_dep_on_mclk;
3297
3298 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
3299 "VDD dependency on SCLK table is missing. \
3300 This table is mandatory", return -1);
3301 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
3302 "VDD dependency on SCLK table has to have is missing. \
3303 This table is mandatory", return -1);
3304
3305 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
3306 "VDD dependency on MCLK table is missing. \
3307 This table is mandatory", return -1);
3308 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
3309 "VDD dependency on MCLK table has to have is missing. \
3310 This table is mandatory", return -1);
3311
3312 data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
3313 data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3314
3315 pptable_info->max_clock_voltage_on_ac.sclk =
3316 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
3317 pptable_info->max_clock_voltage_on_ac.mclk =
3318 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
3319 pptable_info->max_clock_voltage_on_ac.vddc =
3320 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3321 pptable_info->max_clock_voltage_on_ac.vddci =
3322 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
3323
3324 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
3325 pptable_info->max_clock_voltage_on_ac.sclk;
3326 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
3327 pptable_info->max_clock_voltage_on_ac.mclk;
3328 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
3329 pptable_info->max_clock_voltage_on_ac.vddc;
3330 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
3331 pptable_info->max_clock_voltage_on_ac.vddci;
3332
3333 return 0;
3334}
3335
3336int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3337{
3338 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3339 int result = 1;
3340
3341 PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr),
3342 "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
3343 return result);
3344
3345 if (0 == data->pcie_dpm_key_disabled) {
3346 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
3347 hwmgr->smumgr,
3348 PPSMC_MSG_PCIeDPM_UnForceLevel)),
3349 "unforce pcie level failed!",
3350 return -1);
3351 }
3352
3353 result = tonga_upload_dpm_level_enable_mask(hwmgr);
3354
3355 return result;
3356}
3357
3358static uint32_t tonga_get_lowest_enable_level(
3359 struct pp_hwmgr *hwmgr, uint32_t level_mask)
3360{
3361 uint32_t level = 0;
3362
3363 while (0 == (level_mask & (1 << level)))
3364 level++;
3365
3366 return level;
3367}
3368
3369static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3370{
3371 uint32_t level;
3372 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3373
3374 if (0 == data->pcie_dpm_key_disabled) {
3375 /* PCIE */
3376 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
3377 level = tonga_get_lowest_enable_level(hwmgr,
3378 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3379 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
3380 "force lowest pcie dpm state failed!", return -1);
3381 }
3382 }
3383
3384 if (0 == data->sclk_dpm_key_disabled) {
3385 /* SCLK */
3386 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3387 level = tonga_get_lowest_enable_level(hwmgr,
3388 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3389
3390 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
3391 "force sclk dpm state failed!", return -1);
3392
3393 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
3394 CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
3395 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3396 Curr_Sclk_Index does not match the level \n");
3397 }
3398 }
3399
3400 if (0 == data->mclk_dpm_key_disabled) {
3401 /* MCLK */
3402 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
3403 level = tonga_get_lowest_enable_level(hwmgr,
3404 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3405 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
3406 "force lowest mclk dpm state failed!", return -1);
3407 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3408 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
3409 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3410 Curr_Mclk_Index does not match the level \n");
3411 }
3412 }
3413
3414 return 0;
3415}
3416
3417static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr)
3418{
3419 uint8_t entryId;
3420 uint8_t voltageId;
3421 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3422 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3423
3424 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
3425 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
3426 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
3427
3428 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3429 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3430 voltageId = sclk_table->entries[entryId].vddInd;
3431 sclk_table->entries[entryId].vddgfx =
3432 pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd;
3433 }
3434 } else {
3435 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3436 voltageId = sclk_table->entries[entryId].vddInd;
3437 sclk_table->entries[entryId].vddc =
3438 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3439 }
3440 }
3441
3442 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
3443 voltageId = mclk_table->entries[entryId].vddInd;
3444 mclk_table->entries[entryId].vddc =
3445 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3446 }
3447
3448 for (entryId = 0; entryId < mm_table->count; ++entryId) {
3449 voltageId = mm_table->entries[entryId].vddcInd;
3450 mm_table->entries[entryId].vddc =
3451 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3452 }
3453
3454 return 0;
3455
3456}
3457
3458static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
3459{
3460 uint8_t entryId;
3461 phm_ppt_v1_voltage_lookup_record v_record;
3462 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3463 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3464
3465 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
3466 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
3467
3468 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3469 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3470 if (sclk_table->entries[entryId].vdd_offset & (1 << 15))
3471 v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
3472 sclk_table->entries[entryId].vdd_offset - 0xFFFF;
3473 else
3474 v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
3475 sclk_table->entries[entryId].vdd_offset;
3476
3477 sclk_table->entries[entryId].vddc =
3478 v_record.us_cac_low = v_record.us_cac_mid =
3479 v_record.us_cac_high = v_record.us_vdd;
3480
3481 tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
3482 }
3483
3484 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
3485 if (mclk_table->entries[entryId].vdd_offset & (1 << 15))
3486 v_record.us_vdd = mclk_table->entries[entryId].vddc +
3487 mclk_table->entries[entryId].vdd_offset - 0xFFFF;
3488 else
3489 v_record.us_vdd = mclk_table->entries[entryId].vddc +
3490 mclk_table->entries[entryId].vdd_offset;
3491
3492 mclk_table->entries[entryId].vddgfx = v_record.us_cac_low =
3493 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
3494 tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
3495 }
3496 }
3497
3498 return 0;
3499
3500}
3501
3502static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
3503{
3504 uint32_t entryId;
3505 phm_ppt_v1_voltage_lookup_record v_record;
3506 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3507 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3508 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
3509
3510 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3511 for (entryId = 0; entryId < mm_table->count; entryId++) {
3512 if (mm_table->entries[entryId].vddgfx_offset & (1 << 15))
3513 v_record.us_vdd = mm_table->entries[entryId].vddc +
3514 mm_table->entries[entryId].vddgfx_offset - 0xFFFF;
3515 else
3516 v_record.us_vdd = mm_table->entries[entryId].vddc +
3517 mm_table->entries[entryId].vddgfx_offset;
3518
3519 /* Add the calculated VDDGFX to the VDDGFX lookup table */
3520 mm_table->entries[entryId].vddgfx = v_record.us_cac_low =
3521 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
3522 tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
3523 }
3524 }
3525 return 0;
3526}
3527
3528
3529/**
3530 * Change virtual leakage voltage to actual value.
3531 *
3532 * @param hwmgr the address of the powerplay hardware manager.
3533 * @param pointer to changing voltage
3534 * @param pointer to leakage table
3535 */
3536static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
3537 uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable)
3538{
3539 uint32_t leakage_index;
3540
3541 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
3542 for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) {
3543 /* if this voltage matches a leakage voltage ID */
3544 /* patch with actual leakage voltage */
3545 if (pLeakageTable->leakage_id[leakage_index] == *voltage) {
3546 *voltage = pLeakageTable->actual_voltage[leakage_index];
3547 break;
3548 }
3549 }
3550
3551 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
3552 printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n");
3553}
3554
3555/**
3556 * Patch voltage lookup table by EVV leakages.
3557 *
3558 * @param hwmgr the address of the powerplay hardware manager.
3559 * @param pointer to voltage lookup table
3560 * @param pointer to leakage table
3561 * @return always 0
3562 */
3563static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
3564 phm_ppt_v1_voltage_lookup_table *lookup_table,
3565 phw_tonga_leakage_voltage *pLeakageTable)
3566{
3567 uint32_t i;
3568
3569 for (i = 0; i < lookup_table->count; i++) {
3570 tonga_patch_with_vdd_leakage(hwmgr,
3571 &lookup_table->entries[i].us_vdd, pLeakageTable);
3572 }
3573
3574 return 0;
3575}
3576
3577static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr,
3578 phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc)
3579{
3580 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3581
3582 tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable);
3583 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
3584 pptable_info->max_clock_voltage_on_dc.vddc;
3585
3586 return 0;
3587}
3588
3589static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage(
3590 struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable,
3591 uint16_t *Vddgfx)
3592{
3593 tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable);
3594 return 0;
3595}
3596
3597int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr,
3598 phm_ppt_v1_voltage_lookup_table *lookup_table)
3599{
3600 uint32_t table_size, i, j;
3601 phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
3602 table_size = lookup_table->count;
3603
3604 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
3605 "Lookup table is empty", return -1);
3606
3607 /* Sorting voltages */
3608 for (i = 0; i < table_size - 1; i++) {
3609 for (j = i + 1; j > 0; j--) {
3610 if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) {
3611 tmp_voltage_lookup_record = lookup_table->entries[j-1];
3612 lookup_table->entries[j-1] = lookup_table->entries[j];
3613 lookup_table->entries[j] = tmp_voltage_lookup_record;
3614 }
3615 }
3616 }
3617
3618 return 0;
3619}
3620
3621static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr)
3622{
3623 int result = 0;
3624 int tmp_result;
3625 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3626 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3627
3628 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3629 tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
3630 pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
3631 if (tmp_result != 0)
3632 result = tmp_result;
3633
3634 tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr,
3635 &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx);
3636 if (tmp_result != 0)
3637 result = tmp_result;
3638 } else {
3639 tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
3640 pptable_info->vddc_lookup_table, &(data->vddc_leakage));
3641 if (tmp_result != 0)
3642 result = tmp_result;
3643
3644 tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr,
3645 &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc);
3646 if (tmp_result != 0)
3647 result = tmp_result;
3648 }
3649
3650 tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
3651 if (tmp_result != 0)
3652 result = tmp_result;
3653
3654 tmp_result = tonga_calc_voltage_dependency_tables(hwmgr);
3655 if (tmp_result != 0)
3656 result = tmp_result;
3657
3658 tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr);
3659 if (tmp_result != 0)
3660 result = tmp_result;
3661
3662 tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table);
3663 if (tmp_result != 0)
3664 result = tmp_result;
3665
3666 tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table);
3667 if (tmp_result != 0)
3668 result = tmp_result;
3669
3670 return result;
3671}
3672
3673int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr)
3674{
3675 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3676 data->low_sclk_interrupt_threshold = 0;
3677
3678 return 0;
3679}
3680
3681int tonga_setup_asic_task(struct pp_hwmgr *hwmgr)
3682{
3683 int tmp_result, result = 0;
3684
3685 tmp_result = tonga_read_clock_registers(hwmgr);
3686 PP_ASSERT_WITH_CODE((0 == tmp_result),
3687 "Failed to read clock registers!", result = tmp_result);
3688
3689 tmp_result = tonga_get_memory_type(hwmgr);
3690 PP_ASSERT_WITH_CODE((0 == tmp_result),
3691 "Failed to get memory type!", result = tmp_result);
3692
3693 tmp_result = tonga_enable_acpi_power_management(hwmgr);
3694 PP_ASSERT_WITH_CODE((0 == tmp_result),
3695 "Failed to enable ACPI power management!", result = tmp_result);
3696
3697 tmp_result = tonga_init_power_gate_state(hwmgr);
3698 PP_ASSERT_WITH_CODE((0 == tmp_result),
3699 "Failed to init power gate state!", result = tmp_result);
3700
3701 tmp_result = tonga_get_mc_microcode_version(hwmgr);
3702 PP_ASSERT_WITH_CODE((0 == tmp_result),
3703 "Failed to get MC microcode version!", result = tmp_result);
3704
3705 tmp_result = tonga_init_sclk_threshold(hwmgr);
3706 PP_ASSERT_WITH_CODE((0 == tmp_result),
3707 "Failed to init sclk threshold!", result = tmp_result);
3708
3709 return result;
3710}
3711
3712/**
3713 * Enable voltage control
3714 *
3715 * @param hwmgr the address of the powerplay hardware manager.
3716 * @return always 0
3717 */
3718int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr)
3719{
3720 /* enable voltage control */
3721 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
3722
3723 return 0;
3724}
3725
3726/**
3727 * Checks if we want to support voltage control
3728 *
3729 * @param hwmgr the address of the powerplay hardware manager.
3730 */
3731bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr)
3732{
3733 const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3734
3735 return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control);
3736}
3737
3738/*---------------------------MC----------------------------*/
3739
3740uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
3741{
3742 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
3743}
3744
3745bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
3746{
3747 bool result = 1;
3748
3749 switch (inReg) {
3750 case mmMC_SEQ_RAS_TIMING:
3751 *outReg = mmMC_SEQ_RAS_TIMING_LP;
3752 break;
3753
3754 case mmMC_SEQ_DLL_STBY:
3755 *outReg = mmMC_SEQ_DLL_STBY_LP;
3756 break;
3757
3758 case mmMC_SEQ_G5PDX_CMD0:
3759 *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
3760 break;
3761
3762 case mmMC_SEQ_G5PDX_CMD1:
3763 *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
3764 break;
3765
3766 case mmMC_SEQ_G5PDX_CTRL:
3767 *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
3768 break;
3769
3770 case mmMC_SEQ_CAS_TIMING:
3771 *outReg = mmMC_SEQ_CAS_TIMING_LP;
3772 break;
3773
3774 case mmMC_SEQ_MISC_TIMING:
3775 *outReg = mmMC_SEQ_MISC_TIMING_LP;
3776 break;
3777
3778 case mmMC_SEQ_MISC_TIMING2:
3779 *outReg = mmMC_SEQ_MISC_TIMING2_LP;
3780 break;
3781
3782 case mmMC_SEQ_PMG_DVS_CMD:
3783 *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
3784 break;
3785
3786 case mmMC_SEQ_PMG_DVS_CTL:
3787 *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
3788 break;
3789
3790 case mmMC_SEQ_RD_CTL_D0:
3791 *outReg = mmMC_SEQ_RD_CTL_D0_LP;
3792 break;
3793
3794 case mmMC_SEQ_RD_CTL_D1:
3795 *outReg = mmMC_SEQ_RD_CTL_D1_LP;
3796 break;
3797
3798 case mmMC_SEQ_WR_CTL_D0:
3799 *outReg = mmMC_SEQ_WR_CTL_D0_LP;
3800 break;
3801
3802 case mmMC_SEQ_WR_CTL_D1:
3803 *outReg = mmMC_SEQ_WR_CTL_D1_LP;
3804 break;
3805
3806 case mmMC_PMG_CMD_EMRS:
3807 *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
3808 break;
3809
3810 case mmMC_PMG_CMD_MRS:
3811 *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
3812 break;
3813
3814 case mmMC_PMG_CMD_MRS1:
3815 *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
3816 break;
3817
3818 case mmMC_SEQ_PMG_TIMING:
3819 *outReg = mmMC_SEQ_PMG_TIMING_LP;
3820 break;
3821
3822 case mmMC_PMG_CMD_MRS2:
3823 *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
3824 break;
3825
3826 case mmMC_SEQ_WR_CTL_2:
3827 *outReg = mmMC_SEQ_WR_CTL_2_LP;
3828 break;
3829
3830 default:
3831 result = 0;
3832 break;
3833 }
3834
3835 return result;
3836}
3837
3838int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table)
3839{
3840 uint32_t i;
3841 uint16_t address;
3842
3843 for (i = 0; i < table->last; i++) {
3844 table->mc_reg_address[i].s0 =
3845 tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
3846 ? address : table->mc_reg_address[i].s1;
3847 }
3848 return 0;
3849}
3850
3851int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table)
3852{
3853 uint8_t i, j;
3854
3855 PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3856 "Invalid VramInfo table.", return -1);
3857 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
3858 "Invalid VramInfo table.", return -1);
3859
3860 for (i = 0; i < table->last; i++) {
3861 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3862 }
3863 ni_table->last = table->last;
3864
3865 for (i = 0; i < table->num_entries; i++) {
3866 ni_table->mc_reg_table_entry[i].mclk_max =
3867 table->mc_reg_table_entry[i].mclk_max;
3868 for (j = 0; j < table->last; j++) {
3869 ni_table->mc_reg_table_entry[i].mc_data[j] =
3870 table->mc_reg_table_entry[i].mc_data[j];
3871 }
3872 }
3873
3874 ni_table->num_entries = table->num_entries;
3875
3876 return 0;
3877}
3878
3879/**
3880 * VBIOS omits some information to reduce size, we need to recover them here.
3881 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
3882 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
3883 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
3884 * 3. need to set these data for each clock range
3885 *
3886 * @param hwmgr the address of the powerplay hardware manager.
3887 * @param table the address of MCRegTable
3888 * @return always 0
3889 */
3890int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table)
3891{
3892 uint8_t i, j, k;
3893 uint32_t temp_reg;
3894 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3895
3896 for (i = 0, j = table->last; i < table->last; i++) {
3897 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3898 "Invalid VramInfo table.", return -1);
3899 switch (table->mc_reg_address[i].s1) {
3900 /*
3901 * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
3902 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
3903 */
3904 case mmMC_SEQ_MISC1:
3905 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
3906 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
3907 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
3908 for (k = 0; k < table->num_entries; k++) {
3909 table->mc_reg_table_entry[k].mc_data[j] =
3910 ((temp_reg & 0xffff0000)) |
3911 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3912 }
3913 j++;
3914 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3915 "Invalid VramInfo table.", return -1);
3916
3917 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
3918 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
3919 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
3920 for (k = 0; k < table->num_entries; k++) {
3921 table->mc_reg_table_entry[k].mc_data[j] =
3922 (temp_reg & 0xffff0000) |
3923 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3924
3925 if (!data->is_memory_GDDR5) {
3926 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3927 }
3928 }
3929 j++;
3930 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3931 "Invalid VramInfo table.", return -1);
3932
3933 if (!data->is_memory_GDDR5) {
3934 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
3935 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
3936 for (k = 0; k < table->num_entries; k++) {
3937 table->mc_reg_table_entry[k].mc_data[j] =
3938 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3939 }
3940 j++;
3941 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3942 "Invalid VramInfo table.", return -1);
3943 }
3944
3945 break;
3946
3947 case mmMC_SEQ_RESERVE_M:
3948 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
3949 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
3950 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
3951 for (k = 0; k < table->num_entries; k++) {
3952 table->mc_reg_table_entry[k].mc_data[j] =
3953 (temp_reg & 0xffff0000) |
3954 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3955 }
3956 j++;
3957 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3958 "Invalid VramInfo table.", return -1);
3959 break;
3960
3961 default:
3962 break;
3963 }
3964
3965 }
3966
3967 table->last = j;
3968
3969 return 0;
3970}
3971
3972int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
3973{
3974 uint8_t i, j;
3975 for (i = 0; i < table->last; i++) {
3976 for (j = 1; j < table->num_entries; j++) {
3977 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3978 table->mc_reg_table_entry[j].mc_data[i]) {
3979 table->validflag |= (1<<i);
3980 break;
3981 }
3982 }
3983 }
3984
3985 return 0;
3986}
3987
3988int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
3989{
3990 int result;
3991 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3992 pp_atomctrl_mc_reg_table *table;
3993 phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table;
3994 uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
3995
3996 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
3997
3998 if (NULL == table)
3999 return -ENOMEM;
4000
4001 /* Program additional LP registers that are no longer programmed by VBIOS */
4002 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
4003 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
4004 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
4005 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
4006 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
4007 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
4008 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
4009 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
4010 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
4011 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
4012 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
4013 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
4014 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
4015 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
4016 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
4017 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
4018 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
4019 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
4020 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
4021 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
4022
4023 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
4024
4025 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
4026
4027 if (0 == result)
4028 result = tonga_copy_vbios_smc_reg_table(table, ni_table);
4029
4030 if (0 == result) {
4031 tonga_set_s0_mc_reg_index(ni_table);
4032 result = tonga_set_mc_special_registers(hwmgr, ni_table);
4033 }
4034
4035 if (0 == result)
4036 tonga_set_valid_flag(ni_table);
4037
4038 kfree(table);
4039 return result;
4040}
4041
4042/*
4043* Copy one arb setting to another and then switch the active set.
4044* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
4045*/
4046int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
4047 uint32_t arbFreqSrc, uint32_t arbFreqDest)
4048{
4049 uint32_t mc_arb_dram_timing;
4050 uint32_t mc_arb_dram_timing2;
4051 uint32_t burst_time;
4052 uint32_t mc_cg_config;
4053
4054 switch (arbFreqSrc) {
4055 case MC_CG_ARB_FREQ_F0:
4056 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
4057 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
4058 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
4059 break;
4060
4061 case MC_CG_ARB_FREQ_F1:
4062 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
4063 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
4064 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
4065 break;
4066
4067 default:
4068 return -1;
4069 }
4070
4071 switch (arbFreqDest) {
4072 case MC_CG_ARB_FREQ_F0:
4073 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
4074 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
4075 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
4076 break;
4077
4078 case MC_CG_ARB_FREQ_F1:
4079 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
4080 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
4081 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
4082 break;
4083
4084 default:
4085 return -1;
4086 }
4087
4088 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
4089 mc_cg_config |= 0x0000000F;
4090 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
4091 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
4092
4093 return 0;
4094}
4095
4096/**
4097 * Initial switch from ARB F0->F1
4098 *
4099 * @param hwmgr the address of the powerplay hardware manager.
4100 * @return always 0
4101 * This function is to be called from the SetPowerState table.
4102 */
4103int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
4104{
4105 return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
4106}
4107
4108/**
4109 * Initialize the ARB DRAM timing table's index field.
4110 *
4111 * @param hwmgr the address of the powerplay hardware manager.
4112 * @return always 0
4113 */
4114int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
4115{
4116 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4117 uint32_t tmp;
4118 int result;
4119
4120 /*
4121 * This is a read-modify-write on the first byte of the ARB table.
4122 * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'.
4123 * This solution is ugly, but we never write the whole table only individual fields in it.
4124 * In reality this field should not be in that structure but in a soft register.
4125 */
4126 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
4127 data->arb_table_start, &tmp, data->sram_end);
4128
4129 if (0 != result)
4130 return result;
4131
4132 tmp &= 0x00FFFFFF;
4133 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
4134
4135 return tonga_write_smc_sram_dword(hwmgr->smumgr,
4136 data->arb_table_start, tmp, data->sram_end);
4137}
4138
4139int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table)
4140{
4141 const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4142
4143 uint32_t i, j;
4144
4145 for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) {
4146 if (data->tonga_mc_reg_table.validflag & 1<<j) {
4147 PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
4148 "Index of mc_reg_table->address[] array out of boundary", return -1);
4149 mc_reg_table->address[i].s0 =
4150 PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0);
4151 mc_reg_table->address[i].s1 =
4152 PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1);
4153 i++;
4154 }
4155 }
4156
4157 mc_reg_table->last = (uint8_t)i;
4158
4159 return 0;
4160}
4161
4162/*convert register values from driver to SMC format */
4163void tonga_convert_mc_registers(
4164 const phw_tonga_mc_reg_entry * pEntry,
4165 SMU72_Discrete_MCRegisterSet *pData,
4166 uint32_t numEntries, uint32_t validflag)
4167{
4168 uint32_t i, j;
4169
4170 for (i = 0, j = 0; j < numEntries; j++) {
4171 if (validflag & 1<<j) {
4172 pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
4173 i++;
4174 }
4175 }
4176}
4177
4178/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */
4179int tonga_convert_mc_reg_table_entry_to_smc(
4180 struct pp_hwmgr *hwmgr,
4181 const uint32_t memory_clock,
4182 SMU72_Discrete_MCRegisterSet *mc_reg_table_data
4183 )
4184{
4185 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4186 uint32_t i = 0;
4187
4188 for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) {
4189 if (memory_clock <=
4190 data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
4191 break;
4192 }
4193 }
4194
4195 if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0))
4196 --i;
4197
4198 tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i],
4199 mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag);
4200
4201 return 0;
4202}
4203
4204int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
4205 SMU72_Discrete_MCRegisters *mc_reg_table)
4206{
4207 int result = 0;
4208 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4209 int res;
4210 uint32_t i;
4211
4212 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
4213 res = tonga_convert_mc_reg_table_entry_to_smc(
4214 hwmgr,
4215 data->dpm_table.mclk_table.dpm_levels[i].value,
4216 &mc_reg_table->data[i]
4217 );
4218
4219 if (0 != res)
4220 result = res;
4221 }
4222
4223 return result;
4224}
4225
4226int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
4227{
4228 int result;
4229 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4230
4231 memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters));
4232 result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
4233 PP_ASSERT_WITH_CODE(0 == result,
4234 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
4235
4236 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
4237 PP_ASSERT_WITH_CODE(0 == result,
4238 "Failed to initialize MCRegTable for driver state!", return result;);
4239
4240 return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
4241 (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end);
4242}
4243
4244/**
4245 * Programs static screed detection parameters
4246 *
4247 * @param hwmgr the address of the powerplay hardware manager.
4248 * @return always 0
4249 */
4250int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
4251{
4252 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
4253
4254 /* Set static screen threshold unit*/
4255 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
4256 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
4257 data->static_screen_threshold_unit);
4258 /* Set static screen threshold*/
4259 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
4260 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
4261 data->static_screen_threshold);
4262
4263 return 0;
4264}
4265
4266/**
4267 * Setup display gap for glitch free memory clock switching.
4268 *
4269 * @param hwmgr the address of the powerplay hardware manager.
4270 * @return always 0
4271 */
4272int tonga_enable_display_gap(struct pp_hwmgr *hwmgr)
4273{
4274 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
4275 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4276
4277 display_gap = PHM_SET_FIELD(display_gap,
4278 CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
4279
4280 display_gap = PHM_SET_FIELD(display_gap,
4281 CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
4282
4283 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4284 ixCG_DISPLAY_GAP_CNTL, display_gap);
4285
4286 return 0;
4287}
4288
4289/**
4290 * Programs activity state transition voting clients
4291 *
4292 * @param hwmgr the address of the powerplay hardware manager.
4293 * @return always 0
4294 */
4295int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
4296{
4297 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
4298
4299 /* Clear reset for voting clients before enabling DPM */
4300 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4301 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
4302 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4303 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
4304
4305 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4306 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
4307 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4308 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
4309 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4310 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
4311 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4312 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
4313 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4314 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
4315 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4316 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
4317 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4318 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
4319 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4320 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
4321
4322 return 0;
4323}
4324
4325
4326int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
4327{
4328 int tmp_result, result = 0;
4329
4330 tmp_result = tonga_check_for_dpm_stopped(hwmgr);
4331
4332 if (cf_tonga_voltage_control(hwmgr)) {
4333 tmp_result = tonga_enable_voltage_control(hwmgr);
4334 PP_ASSERT_WITH_CODE((0 == tmp_result),
4335 "Failed to enable voltage control!", result = tmp_result);
4336
4337 tmp_result = tonga_construct_voltage_tables(hwmgr);
4338 PP_ASSERT_WITH_CODE((0 == tmp_result),
4339 "Failed to contruct voltage tables!", result = tmp_result);
4340 }
4341
4342 tmp_result = tonga_initialize_mc_reg_table(hwmgr);
4343 PP_ASSERT_WITH_CODE((0 == tmp_result),
4344 "Failed to initialize MC reg table!", result = tmp_result);
4345
4346 tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr);
4347 PP_ASSERT_WITH_CODE((0 == tmp_result),
4348 "Failed to program static screen threshold parameters!", result = tmp_result);
4349
4350 tmp_result = tonga_enable_display_gap(hwmgr);
4351 PP_ASSERT_WITH_CODE((0 == tmp_result),
4352 "Failed to enable display gap!", result = tmp_result);
4353
4354 tmp_result = tonga_program_voting_clients(hwmgr);
4355 PP_ASSERT_WITH_CODE((0 == tmp_result),
4356 "Failed to program voting clients!", result = tmp_result);
4357
4358 tmp_result = tonga_process_firmware_header(hwmgr);
4359 PP_ASSERT_WITH_CODE((0 == tmp_result),
4360 "Failed to process firmware header!", result = tmp_result);
4361
4362 tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr);
4363 PP_ASSERT_WITH_CODE((0 == tmp_result),
4364 "Failed to initialize switch from ArbF0 to F1!", result = tmp_result);
4365
4366 tmp_result = tonga_init_smc_table(hwmgr);
4367 PP_ASSERT_WITH_CODE((0 == tmp_result),
4368 "Failed to initialize SMC table!", result = tmp_result);
4369
4370 tmp_result = tonga_init_arb_table_index(hwmgr);
4371 PP_ASSERT_WITH_CODE((0 == tmp_result),
4372 "Failed to initialize ARB table index!", result = tmp_result);
4373
4374 tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
4375 PP_ASSERT_WITH_CODE((0 == tmp_result),
4376 "Failed to populate initialize MC Reg table!", result = tmp_result);
4377
4378 tmp_result = tonga_notify_smc_display_change(hwmgr, false);
4379 PP_ASSERT_WITH_CODE((0 == tmp_result),
4380 "Failed to notify no display!", result = tmp_result);
4381
4382 /* enable SCLK control */
4383 tmp_result = tonga_enable_sclk_control(hwmgr);
4384 PP_ASSERT_WITH_CODE((0 == tmp_result),
4385 "Failed to enable SCLK control!", result = tmp_result);
4386
4387 /* enable DPM */
4388 tmp_result = tonga_start_dpm(hwmgr);
4389 PP_ASSERT_WITH_CODE((0 == tmp_result),
4390 "Failed to start DPM!", result = tmp_result);
4391
4392 return result;
4393}
4394
4395int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4396{
4397 int tmp_result, result = 0;
4398
4399 tmp_result = tonga_check_for_dpm_running(hwmgr);
4400 PP_ASSERT_WITH_CODE((0 == tmp_result),
4401 "SMC is still running!", return 0);
4402
4403 tmp_result = tonga_stop_dpm(hwmgr);
4404 PP_ASSERT_WITH_CODE((0 == tmp_result),
4405 "Failed to stop DPM!", result = tmp_result);
4406
4407 tmp_result = tonga_reset_to_default(hwmgr);
4408 PP_ASSERT_WITH_CODE((0 == tmp_result),
4409 "Failed to reset to default!", result = tmp_result);
4410
4411 return result;
4412}
4413
4414int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
4415{
4416 int result;
4417
4418 result = tonga_set_boot_state(hwmgr);
4419 if (0 != result)
4420 printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n");
4421
4422 return result;
4423}
4424
4425int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
4426{
4427 if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
4428 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
4429 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
4430 }
4431
4432 if (NULL != hwmgr->backend) {
4433 kfree(hwmgr->backend);
4434 hwmgr->backend = NULL;
4435 }
4436
4437 return 0;
4438}
4439
4440/**
4441 * Initializes the Volcanic Islands Hardware Manager
4442 *
4443 * @param hwmgr the address of the powerplay hardware manager.
4444 * @return 1 if success; otherwise appropriate error code.
4445 */
4446int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4447{
4448 int result = 0;
4449 SMU72_Discrete_DpmTable *table = NULL;
4450 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4451 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
4452 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4453 phw_tonga_ulv_parm *ulv;
4454
4455 PP_ASSERT_WITH_CODE((NULL != hwmgr),
4456 "Invalid Parameter!", return -1;);
4457
4458 data->dll_defaule_on = 0;
4459 data->sram_end = SMC_RAM_END;
4460
4461 data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
4462 data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT;
4463 data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT;
4464 data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT;
4465 data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT;
4466 data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT;
4467 data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT;
4468 data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT;
4469
4470 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
4471 data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA;
4472 data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT;
4473
4474 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4475 PHM_PlatformCaps_DisableVoltageIsland);
4476
4477 data->sclk_dpm_key_disabled = 0;
4478 data->mclk_dpm_key_disabled = 0;
4479 data->pcie_dpm_key_disabled = 0;
4480 data->pcc_monitor_enabled = 0;
4481
4482 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4483 PHM_PlatformCaps_UnTabledHardwareInterface);
4484
4485 data->gpio_debug = 0;
4486 data->engine_clock_data = 0;
4487 data->memory_clock_data = 0;
4488 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4489 PHM_PlatformCaps_DynamicPatchPowerState);
4490
4491 /* need to set voltage control types before EVV patching*/
4492 data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE;
4493 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
4494 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
4495 data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
4496
4497 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4498 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
4499 data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4500 }
4501
4502 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4503 PHM_PlatformCaps_ControlVDDGFX)) {
4504 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4505 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
4506 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4507 }
4508 }
4509
4510 if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
4511 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4512 PHM_PlatformCaps_ControlVDDGFX);
4513 }
4514
4515 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4516 PHM_PlatformCaps_EnableMVDDControl)) {
4517 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4518 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) {
4519 data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
4520 }
4521 }
4522
4523 if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) {
4524 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4525 PHM_PlatformCaps_EnableMVDDControl);
4526 }
4527
4528 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4529 PHM_PlatformCaps_ControlVDDCI)) {
4530 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4531 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
4532 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
4533 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4534 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
4535 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4536 }
4537
4538 if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control)
4539 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4540 PHM_PlatformCaps_ControlVDDCI);
4541
4542 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4543 PHM_PlatformCaps_TablelessHardwareInterface);
4544
4545 if (pptable_info->cac_dtp_table->usClockStretchAmount != 0)
4546 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4547 PHM_PlatformCaps_ClockStretcher);
4548
4549 /* Initializes DPM default values*/
4550 tonga_initialize_dpm_defaults(hwmgr);
4551
4552 /* Get leakage voltage based on leakage ID.*/
4553 PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)),
4554 "Get EVV Voltage Failed. Abort Driver loading!", return -1);
4555
4556 tonga_complete_dependency_tables(hwmgr);
4557
4558 /* Parse pptable data read from VBIOS*/
4559 tonga_set_private_var_based_on_pptale(hwmgr);
4560
4561 /* ULV Support*/
4562 ulv = &(data->ulv);
4563 ulv->ulv_supported = 0;
4564
4565 /* Initalize Dynamic State Adjustment Rule Settings*/
4566 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
4567 if (result)
4568 printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
4569 data->uvd_enabled = 0;
4570
4571 table = &(data->smc_state_table);
4572
4573 /*
4574 * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
4575 * Peak Current Control feature is enabled and we should program PCC HW register
4576 */
4577 if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
4578 uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
4579 CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
4580
4581 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
4582 case 0:
4583 temp_reg = PHM_SET_FIELD(temp_reg,
4584 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
4585 break;
4586 case 1:
4587 temp_reg = PHM_SET_FIELD(temp_reg,
4588 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
4589 break;
4590 case 2:
4591 temp_reg = PHM_SET_FIELD(temp_reg,
4592 CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
4593 break;
4594 case 3:
4595 temp_reg = PHM_SET_FIELD(temp_reg,
4596 CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
4597 break;
4598 case 4:
4599 temp_reg = PHM_SET_FIELD(temp_reg,
4600 CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
4601 break;
4602 default:
4603 printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \
4604 Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n");
4605 break;
4606 }
4607 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4608 ixCNB_PWRMGT_CNTL, temp_reg);
4609 }
4610
4611 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4612 PHM_PlatformCaps_EnableSMU7ThermalManagement);
4613 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4614 PHM_PlatformCaps_SMU7);
4615
4616 data->vddc_phase_shed_control = 0;
4617
4618 if (0 == result) {
4619 struct cgs_system_info sys_info = {0};
4620
4621 data->is_tlu_enabled = 0;
4622 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
4623 TONGA_MAX_HARDWARE_POWERLEVELS;
4624 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
4625 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
4626
4627 sys_info.size = sizeof(struct cgs_system_info);
4628 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
4629 result = cgs_query_system_info(hwmgr->device, &sys_info);
4630 if (result)
4631 data->pcie_gen_cap = 0x30007;
4632 else
4633 data->pcie_gen_cap = (uint32_t)sys_info.value;
4634 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
4635 data->pcie_spc_cap = 20;
4636 sys_info.size = sizeof(struct cgs_system_info);
4637 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
4638 result = cgs_query_system_info(hwmgr->device, &sys_info);
4639 if (result)
4640 data->pcie_lane_cap = 0x2f0000;
4641 else
4642 data->pcie_lane_cap = (uint32_t)sys_info.value;
4643 } else {
4644 /* Ignore return value in here, we are cleaning up a mess. */
4645 tonga_hwmgr_backend_fini(hwmgr);
4646 }
4647
4648 return result;
4649}
4650
4651static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr,
4652 enum amd_dpm_forced_level level)
4653{
4654 int ret = 0;
4655
4656 switch (level) {
4657 case AMD_DPM_FORCED_LEVEL_HIGH:
4658 ret = tonga_force_dpm_highest(hwmgr);
4659 if (ret)
4660 return ret;
4661 break;
4662 case AMD_DPM_FORCED_LEVEL_LOW:
4663 ret = tonga_force_dpm_lowest(hwmgr);
4664 if (ret)
4665 return ret;
4666 break;
4667 case AMD_DPM_FORCED_LEVEL_AUTO:
4668 ret = tonga_unforce_dpm_levels(hwmgr);
4669 if (ret)
4670 return ret;
4671 break;
4672 default:
4673 break;
4674 }
4675
4676 hwmgr->dpm_level = level;
4677 return ret;
4678}
4679
4680static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
4681 struct pp_power_state *prequest_ps,
4682 const struct pp_power_state *pcurrent_ps)
4683{
4684 struct tonga_power_state *tonga_ps =
4685 cast_phw_tonga_power_state(&prequest_ps->hardware);
4686
4687 uint32_t sclk;
4688 uint32_t mclk;
4689 struct PP_Clocks minimum_clocks = {0};
4690 bool disable_mclk_switching;
4691 bool disable_mclk_switching_for_frame_lock;
4692 struct cgs_display_info info = {0};
4693 const struct phm_clock_and_voltage_limits *max_limits;
4694 uint32_t i;
4695 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4696 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4697
4698 int32_t count;
4699 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
4700
4701 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
4702
4703 PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2,
4704 "VI should always have 2 performance levels",
4705 );
4706
4707 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
4708 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
4709 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
4710
4711 if (PP_PowerSource_DC == hwmgr->power_source) {
4712 for (i = 0; i < tonga_ps->performance_level_count; i++) {
4713 if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk)
4714 tonga_ps->performance_levels[i].memory_clock = max_limits->mclk;
4715 if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk)
4716 tonga_ps->performance_levels[i].engine_clock = max_limits->sclk;
4717 }
4718 }
4719
4720 tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
4721 tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
4722
4723 tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
4724
4725 cgs_get_active_displays_info(hwmgr->device, &info);
4726
4727 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
4728
4729 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
4730
4731 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4732
4733 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
4734 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
4735
4736 for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
4737 if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) {
4738 stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk;
4739 break;
4740 }
4741 }
4742
4743 if (count < 0)
4744 stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk;
4745
4746 stable_pstate_mclk = max_limits->mclk;
4747
4748 minimum_clocks.engineClock = stable_pstate_sclk;
4749 minimum_clocks.memoryClock = stable_pstate_mclk;
4750 }
4751
4752 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
4753 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
4754
4755 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
4756 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
4757
4758 tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
4759
4760 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
4761 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
4762 "Overdrive sclk exceeds limit",
4763 hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
4764
4765 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
4766 tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
4767 }
4768
4769 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
4770 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
4771 "Overdrive mclk exceeds limit",
4772 hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4773
4774 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
4775 tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
4776 }
4777
4778 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
4779 hwmgr->platform_descriptor.platformCaps,
4780 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
4781
4782 disable_mclk_switching = (1 < info.display_count) ||
4783 disable_mclk_switching_for_frame_lock;
4784
4785 sclk = tonga_ps->performance_levels[0].engine_clock;
4786 mclk = tonga_ps->performance_levels[0].memory_clock;
4787
4788 if (disable_mclk_switching)
4789 mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock;
4790
4791 if (sclk < minimum_clocks.engineClock)
4792 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
4793
4794 if (mclk < minimum_clocks.memoryClock)
4795 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
4796
4797 tonga_ps->performance_levels[0].engine_clock = sclk;
4798 tonga_ps->performance_levels[0].memory_clock = mclk;
4799
4800 tonga_ps->performance_levels[1].engine_clock =
4801 (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ?
4802 tonga_ps->performance_levels[1].engine_clock :
4803 tonga_ps->performance_levels[0].engine_clock;
4804
4805 if (disable_mclk_switching) {
4806 if (mclk < tonga_ps->performance_levels[1].memory_clock)
4807 mclk = tonga_ps->performance_levels[1].memory_clock;
4808
4809 tonga_ps->performance_levels[0].memory_clock = mclk;
4810 tonga_ps->performance_levels[1].memory_clock = mclk;
4811 } else {
4812 if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock)
4813 tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock;
4814 }
4815
4816 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4817 for (i=0; i < tonga_ps->performance_level_count; i++) {
4818 tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4819 tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4820 tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4821 tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4822 }
4823 }
4824
4825 return 0;
4826}
4827
4828int tonga_get_power_state_size(struct pp_hwmgr *hwmgr)
4829{
4830 return sizeof(struct tonga_power_state);
4831}
4832
4833static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
4834{
4835 struct pp_power_state *ps;
4836 struct tonga_power_state *tonga_ps;
4837
4838 if (hwmgr == NULL)
4839 return -EINVAL;
4840
4841 ps = hwmgr->request_ps;
4842
4843 if (ps == NULL)
4844 return -EINVAL;
4845
4846 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
4847
4848 if (low)
4849 return tonga_ps->performance_levels[0].memory_clock;
4850 else
4851 return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
4852}
4853
4854static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
4855{
4856 struct pp_power_state *ps;
4857 struct tonga_power_state *tonga_ps;
4858
4859 if (hwmgr == NULL)
4860 return -EINVAL;
4861
4862 ps = hwmgr->request_ps;
4863
4864 if (ps == NULL)
4865 return -EINVAL;
4866
4867 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
4868
4869 if (low)
4870 return tonga_ps->performance_levels[0].engine_clock;
4871 else
4872 return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
4873}
4874
4875static uint16_t tonga_get_current_pcie_speed(
4876 struct pp_hwmgr *hwmgr)
4877{
4878 uint32_t speed_cntl = 0;
4879
4880 speed_cntl = cgs_read_ind_register(hwmgr->device,
4881 CGS_IND_REG__PCIE,
4882 ixPCIE_LC_SPEED_CNTL);
4883 return((uint16_t)PHM_GET_FIELD(speed_cntl,
4884 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
4885}
4886
4887static int tonga_get_current_pcie_lane_number(
4888 struct pp_hwmgr *hwmgr)
4889{
4890 uint32_t link_width;
4891
4892 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
4893 CGS_IND_REG__PCIE,
4894 PCIE_LC_LINK_WIDTH_CNTL,
4895 LC_LINK_WIDTH_RD);
4896
4897 PP_ASSERT_WITH_CODE((7 >= link_width),
4898 "Invalid PCIe lane width!", return 0);
4899
4900 return decode_pcie_lane_width(link_width);
4901}
4902
4903static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
4904 struct pp_hw_power_state *hw_ps)
4905{
4906 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4907 struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps;
4908 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
4909 uint16_t size;
4910 uint8_t frev, crev;
4911 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4912
4913 /* First retrieve the Boot clocks and VDDC from the firmware info table.
4914 * We assume here that fw_info is unchanged if this call fails.
4915 */
4916 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
4917 hwmgr->device, index,
4918 &size, &frev, &crev);
4919 if (!fw_info)
4920 /* During a test, there is no firmware info table. */
4921 return 0;
4922
4923 /* Patch the state. */
4924 data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock);
4925 data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock);
4926 data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
4927 data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
4928 data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
4929 data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr);
4930 data->vbios_boot_state.pcie_lane_bootup_value =
4931 (uint16_t)tonga_get_current_pcie_lane_number(hwmgr);
4932
4933 /* set boot power state */
4934 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
4935 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
4936 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
4937 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
4938
4939 return 0;
4940}
4941
4942static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
4943 void *state, struct pp_power_state *power_state,
4944 void *pp_table, uint32_t classification_flag)
4945{
4946 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4947
4948 struct tonga_power_state *tonga_ps =
4949 (struct tonga_power_state *)(&(power_state->hardware));
4950
4951 struct tonga_performance_level *performance_level;
4952
4953 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
4954
4955 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
4956 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
4957
4958 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
4959 (ATOM_Tonga_SCLK_Dependency_Table *)
4960 (((unsigned long)powerplay_table) +
4961 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
4962
4963 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
4964 (ATOM_Tonga_MCLK_Dependency_Table *)
4965 (((unsigned long)powerplay_table) +
4966 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
4967
4968 /* The following fields are not initialized here: id orderedList allStatesList */
4969 power_state->classification.ui_label =
4970 (le16_to_cpu(state_entry->usClassification) &
4971 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
4972 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
4973 power_state->classification.flags = classification_flag;
4974 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
4975
4976 power_state->classification.temporary_state = false;
4977 power_state->classification.to_be_deleted = false;
4978
4979 power_state->validation.disallowOnDC =
4980 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC));
4981
4982 power_state->pcie.lanes = 0;
4983
4984 power_state->display.disableFrameModulation = false;
4985 power_state->display.limitRefreshrate = false;
4986 power_state->display.enableVariBright =
4987 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT));
4988
4989 power_state->validation.supportedPowerLevels = 0;
4990 power_state->uvd_clocks.VCLK = 0;
4991 power_state->uvd_clocks.DCLK = 0;
4992 power_state->temperatures.min = 0;
4993 power_state->temperatures.max = 0;
4994
4995 performance_level = &(tonga_ps->performance_levels
4996 [tonga_ps->performance_level_count++]);
4997
4998 PP_ASSERT_WITH_CODE(
4999 (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS),
5000 "Performance levels exceeds SMC limit!",
5001 return -1);
5002
5003 PP_ASSERT_WITH_CODE(
5004 (tonga_ps->performance_level_count <=
5005 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
5006 "Performance levels exceeds Driver limit!",
5007 return -1);
5008
5009 /* Performance levels are arranged from low to high. */
5010 performance_level->memory_clock =
5011 le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk);
5012
5013 performance_level->engine_clock =
5014 le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk);
5015
5016 performance_level->pcie_gen = get_pcie_gen_support(
5017 data->pcie_gen_cap,
5018 state_entry->ucPCIEGenLow);
5019
5020 performance_level->pcie_lane = get_pcie_lane_support(
5021 data->pcie_lane_cap,
5022 state_entry->ucPCIELaneHigh);
5023
5024 performance_level =
5025 &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]);
5026
5027 performance_level->memory_clock =
5028 le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk);
5029
5030 performance_level->engine_clock =
5031 le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk);
5032
5033 performance_level->pcie_gen = get_pcie_gen_support(
5034 data->pcie_gen_cap,
5035 state_entry->ucPCIEGenHigh);
5036
5037 performance_level->pcie_lane = get_pcie_lane_support(
5038 data->pcie_lane_cap,
5039 state_entry->ucPCIELaneHigh);
5040
5041 return 0;
5042}
5043
5044static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
5045 unsigned long entry_index, struct pp_power_state *ps)
5046{
5047 int result;
5048 struct tonga_power_state *tonga_ps;
5049 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5050
5051 struct phm_ppt_v1_information *table_info =
5052 (struct phm_ppt_v1_information *)(hwmgr->pptable);
5053
5054 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5055 table_info->vdd_dep_on_mclk;
5056
5057 ps->hardware.magic = PhwTonga_Magic;
5058
5059 tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
5060
5061 result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
5062 tonga_get_pp_table_entry_callback_func);
5063
5064 /* This is the earliest time we have all the dependency table and the VBIOS boot state
5065 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
5066 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
5067 */
5068 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
5069 if (dep_mclk_table->entries[0].clk !=
5070 data->vbios_boot_state.mclk_bootup_value)
5071 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
5072 "does not match VBIOS boot MCLK level");
5073 if (dep_mclk_table->entries[0].vddci !=
5074 data->vbios_boot_state.vddci_bootup_value)
5075 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
5076 "does not match VBIOS boot VDDCI level");
5077 }
5078
5079 /* set DC compatible flag if this state supports DC */
5080 if (!ps->validation.disallowOnDC)
5081 tonga_ps->dc_compatible = true;
5082
5083 if (ps->classification.flags & PP_StateClassificationFlag_ACPI)
5084 data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen;
5085 else if (ps->classification.flags & PP_StateClassificationFlag_Boot) {
5086 if (data->bacos.best_match == 0xffff) {
5087 /* For V.I. use boot state as base BACO state */
5088 data->bacos.best_match = PP_StateClassificationFlag_Boot;
5089 data->bacos.performance_level = tonga_ps->performance_levels[0];
5090 }
5091 }
5092
5093 tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK;
5094 tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK;
5095
5096 if (!result) {
5097 uint32_t i;
5098
5099 switch (ps->classification.ui_label) {
5100 case PP_StateUILabel_Performance:
5101 data->use_pcie_performance_levels = true;
5102
5103 for (i = 0; i < tonga_ps->performance_level_count; i++) {
5104 if (data->pcie_gen_performance.max <
5105 tonga_ps->performance_levels[i].pcie_gen)
5106 data->pcie_gen_performance.max =
5107 tonga_ps->performance_levels[i].pcie_gen;
5108
5109 if (data->pcie_gen_performance.min >
5110 tonga_ps->performance_levels[i].pcie_gen)
5111 data->pcie_gen_performance.min =
5112 tonga_ps->performance_levels[i].pcie_gen;
5113
5114 if (data->pcie_lane_performance.max <
5115 tonga_ps->performance_levels[i].pcie_lane)
5116 data->pcie_lane_performance.max =
5117 tonga_ps->performance_levels[i].pcie_lane;
5118
5119 if (data->pcie_lane_performance.min >
5120 tonga_ps->performance_levels[i].pcie_lane)
5121 data->pcie_lane_performance.min =
5122 tonga_ps->performance_levels[i].pcie_lane;
5123 }
5124 break;
5125 case PP_StateUILabel_Battery:
5126 data->use_pcie_power_saving_levels = true;
5127
5128 for (i = 0; i < tonga_ps->performance_level_count; i++) {
5129 if (data->pcie_gen_power_saving.max <
5130 tonga_ps->performance_levels[i].pcie_gen)
5131 data->pcie_gen_power_saving.max =
5132 tonga_ps->performance_levels[i].pcie_gen;
5133
5134 if (data->pcie_gen_power_saving.min >
5135 tonga_ps->performance_levels[i].pcie_gen)
5136 data->pcie_gen_power_saving.min =
5137 tonga_ps->performance_levels[i].pcie_gen;
5138
5139 if (data->pcie_lane_power_saving.max <
5140 tonga_ps->performance_levels[i].pcie_lane)
5141 data->pcie_lane_power_saving.max =
5142 tonga_ps->performance_levels[i].pcie_lane;
5143
5144 if (data->pcie_lane_power_saving.min >
5145 tonga_ps->performance_levels[i].pcie_lane)
5146 data->pcie_lane_power_saving.min =
5147 tonga_ps->performance_levels[i].pcie_lane;
5148 }
5149 break;
5150 default:
5151 break;
5152 }
5153 }
5154 return 0;
5155}
5156
5157static void
5158tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
5159{
5160 uint32_t sclk, mclk, activity_percent;
5161 uint32_t offset;
5162 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5163
5164 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
5165
5166 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5167
5168 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
5169
5170 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5171 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
5172
5173
5174 offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
5175 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
5176 activity_percent += 0x80;
5177 activity_percent >>= 8;
5178
5179 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
5180
5181}
5182
5183static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
5184{
5185 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5186 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5187 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5188 struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
5189 uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
5190 struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
5191 uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
5192 struct PP_Clocks min_clocks = {0};
5193 uint32_t i;
5194 struct cgs_display_info info = {0};
5195
5196 data->need_update_smu7_dpm_table = 0;
5197
5198 for (i = 0; i < psclk_table->count; i++) {
5199 if (sclk == psclk_table->dpm_levels[i].value)
5200 break;
5201 }
5202
5203 if (i >= psclk_table->count)
5204 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
5205 else {
5206 /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/
5207 if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
5208 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
5209 }
5210
5211 for (i=0; i < pmclk_table->count; i++) {
5212 if (mclk == pmclk_table->dpm_levels[i].value)
5213 break;
5214 }
5215
5216 if (i >= pmclk_table->count)
5217 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
5218
5219 cgs_get_active_displays_info(hwmgr->device, &info);
5220
5221 if (data->display_timing.num_existing_displays != info.display_count)
5222 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
5223
5224 return 0;
5225}
5226
5227static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps)
5228{
5229 uint32_t i;
5230 uint32_t sclk, max_sclk = 0;
5231 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5232 struct tonga_dpm_table *pdpm_table = &data->dpm_table;
5233
5234 for (i = 0; i < hw_ps->performance_level_count; i++) {
5235 sclk = hw_ps->performance_levels[i].engine_clock;
5236 if (max_sclk < sclk)
5237 max_sclk = sclk;
5238 }
5239
5240 for (i = 0; i < pdpm_table->sclk_table.count; i++) {
5241 if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk)
5242 return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ?
5243 pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value :
5244 pdpm_table->pcie_speed_table.dpm_levels[i].value);
5245 }
5246
5247 return 0;
5248}
5249
5250static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
5251{
5252 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5253 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5254 const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
5255 const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
5256
5257 uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps);
5258 uint16_t current_link_speed;
5259
5260 if (data->force_pcie_gen == PP_PCIEGenInvalid)
5261 current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps);
5262 else
5263 current_link_speed = data->force_pcie_gen;
5264
5265 data->force_pcie_gen = PP_PCIEGenInvalid;
5266 data->pspp_notify_required = false;
5267 if (target_link_speed > current_link_speed) {
5268 switch(target_link_speed) {
5269 case PP_PCIEGen3:
5270 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
5271 break;
5272 data->force_pcie_gen = PP_PCIEGen2;
5273 if (current_link_speed == PP_PCIEGen2)
5274 break;
5275 case PP_PCIEGen2:
5276 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
5277 break;
5278 default:
5279 data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr);
5280 break;
5281 }
5282 } else {
5283 if (target_link_speed < current_link_speed)
5284 data->pspp_notify_required = true;
5285 }
5286
5287 return 0;
5288}
5289
5290static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5291{
5292 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5293
5294 if (0 == data->need_update_smu7_dpm_table)
5295 return 0;
5296
5297 if ((0 == data->sclk_dpm_key_disabled) &&
5298 (data->need_update_smu7_dpm_table &
5299 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5300 PP_ASSERT_WITH_CODE(
5301 true == tonga_is_dpm_running(hwmgr),
5302 "Trying to freeze SCLK DPM when DPM is disabled",
5303 );
5304 PP_ASSERT_WITH_CODE(
5305 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5306 PPSMC_MSG_SCLKDPM_FreezeLevel),
5307 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
5308 return -1);
5309 }
5310
5311 if ((0 == data->mclk_dpm_key_disabled) &&
5312 (data->need_update_smu7_dpm_table &
5313 DPMTABLE_OD_UPDATE_MCLK)) {
5314 PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
5315 "Trying to freeze MCLK DPM when DPM is disabled",
5316 );
5317 PP_ASSERT_WITH_CODE(
5318 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5319 PPSMC_MSG_MCLKDPM_FreezeLevel),
5320 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
5321 return -1);
5322 }
5323
5324 return 0;
5325}
5326
5327static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
5328{
5329 int result = 0;
5330
5331 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5332 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5333 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5334 uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
5335 uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
5336 struct tonga_dpm_table *pdpm_table = &data->dpm_table;
5337
5338 struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
5339 uint32_t dpm_count, clock_percent;
5340 uint32_t i;
5341
5342 if (0 == data->need_update_smu7_dpm_table)
5343 return 0;
5344
5345 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
5346 pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
5347
5348 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
5349 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
5350 /* Need to do calculation based on the golden DPM table
5351 * as the Heatmap GPU Clock axis is also based on the default values
5352 */
5353 PP_ASSERT_WITH_CODE(
5354 (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
5355 "Divide by 0!",
5356 return -1);
5357 dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
5358 for (i = dpm_count; i > 1; i--) {
5359 if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
5360 clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
5361 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
5362
5363 pdpm_table->sclk_table.dpm_levels[i].value =
5364 pgolden_dpm_table->sclk_table.dpm_levels[i].value +
5365 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
5366
5367 } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
5368 clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
5369 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
5370
5371 pdpm_table->sclk_table.dpm_levels[i].value =
5372 pgolden_dpm_table->sclk_table.dpm_levels[i].value -
5373 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
5374 } else
5375 pdpm_table->sclk_table.dpm_levels[i].value =
5376 pgolden_dpm_table->sclk_table.dpm_levels[i].value;
5377 }
5378 }
5379 }
5380
5381 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
5382 pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
5383
5384 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
5385 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
5386
5387 PP_ASSERT_WITH_CODE(
5388 (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
5389 "Divide by 0!",
5390 return -1);
5391 dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
5392 for (i = dpm_count; i > 1; i--) {
5393 if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
5394 clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
5395 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
5396
5397 pdpm_table->mclk_table.dpm_levels[i].value =
5398 pgolden_dpm_table->mclk_table.dpm_levels[i].value +
5399 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
5400
5401 } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
5402 clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
5403 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
5404
5405 pdpm_table->mclk_table.dpm_levels[i].value =
5406 pgolden_dpm_table->mclk_table.dpm_levels[i].value -
5407 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
5408 } else
5409 pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
5410 }
5411 }
5412 }
5413
5414 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
5415 result = tonga_populate_all_memory_levels(hwmgr);
5416 PP_ASSERT_WITH_CODE((0 == result),
5417 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
5418 return result);
5419 }
5420
5421 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
5422 /*populate MCLK dpm table to SMU7 */
5423 result = tonga_populate_all_memory_levels(hwmgr);
5424 PP_ASSERT_WITH_CODE((0 == result),
5425 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
5426 return result);
5427 }
5428
5429 return result;
5430}
5431
5432static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
5433 struct tonga_single_dpm_table * pdpm_table,
5434 uint32_t low_limit, uint32_t high_limit)
5435{
5436 uint32_t i;
5437
5438 for (i = 0; i < pdpm_table->count; i++) {
5439 if ((pdpm_table->dpm_levels[i].value < low_limit) ||
5440 (pdpm_table->dpm_levels[i].value > high_limit))
5441 pdpm_table->dpm_levels[i].enabled = false;
5442 else
5443 pdpm_table->dpm_levels[i].enabled = true;
5444 }
5445 return 0;
5446}
5447
5448static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
5449{
5450 int result = 0;
5451 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5452 uint32_t high_limit_count;
5453
5454 PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
5455 "power state did not have any performance level",
5456 return -1);
5457
5458 high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
5459
5460 tonga_trim_single_dpm_states(hwmgr,
5461 &(data->dpm_table.sclk_table),
5462 hw_state->performance_levels[0].engine_clock,
5463 hw_state->performance_levels[high_limit_count].engine_clock);
5464
5465 tonga_trim_single_dpm_states(hwmgr,
5466 &(data->dpm_table.mclk_table),
5467 hw_state->performance_levels[0].memory_clock,
5468 hw_state->performance_levels[high_limit_count].memory_clock);
5469
5470 return result;
5471}
5472
5473static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
5474{
5475 int result;
5476 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5477 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5478 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5479
5480 result = tonga_trim_dpm_states(hwmgr, tonga_ps);
5481 if (0 != result)
5482 return result;
5483
5484 data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
5485 data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
5486 data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
5487 if (data->uvd_enabled)
5488 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
5489
5490 data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
5491
5492 return 0;
5493}
5494
5495int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
5496{
5497 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
5498 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable :
5499 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable);
5500}
5501
5502int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
5503{
5504 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
5505 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
5506 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
5507}
5508
5509int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
5510{
5511 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5512 uint32_t mm_boot_level_offset, mm_boot_level_value;
5513 struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
5514
5515 if (!bgate) {
5516 data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1);
5517 mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
5518 mm_boot_level_offset /= 4;
5519 mm_boot_level_offset *= 4;
5520 mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
5521 mm_boot_level_value &= 0x00FFFFFF;
5522 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
5523 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
5524
5525 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) ||
5526 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
5527 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5528 PPSMC_MSG_UVDDPM_SetEnabledMask,
5529 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
5530 }
5531
5532 return tonga_enable_disable_uvd_dpm(hwmgr, !bgate);
5533}
5534
5535int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
5536{
5537 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5538 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5539 const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
5540 const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
5541
5542 uint32_t mm_boot_level_offset, mm_boot_level_value;
5543 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
5544
5545 if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) {
5546 data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1);
5547
5548 mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
5549 mm_boot_level_offset /= 4;
5550 mm_boot_level_offset *= 4;
5551 mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
5552 mm_boot_level_value &= 0xFF00FFFF;
5553 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
5554 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
5555
5556 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
5557 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5558 PPSMC_MSG_VCEDPM_SetEnabledMask,
5559 (uint32_t)(1 << data->smc_state_table.VceBootLevel));
5560
5561 tonga_enable_disable_vce_dpm(hwmgr, true);
5562 } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0)
5563 tonga_enable_disable_vce_dpm(hwmgr, false);
5564
5565 return 0;
5566}
5567
5568static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
5569{
5570 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5571
5572 uint32_t address;
5573 int32_t result;
5574
5575 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
5576 return 0;
5577
5578
5579 memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters));
5580
5581 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
5582
5583 if(result != 0)
5584 return result;
5585
5586
5587 address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
5588
5589 return tonga_copy_bytes_to_smc(hwmgr->smumgr, address,
5590 (uint8_t *)&data->mc_reg_table.data[0],
5591 sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
5592 data->sram_end);
5593}
5594
5595static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
5596{
5597 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5598
5599 if (data->need_update_smu7_dpm_table &
5600 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
5601 return tonga_program_memory_timing_parameters(hwmgr);
5602
5603 return 0;
5604}
5605
5606static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5607{
5608 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5609
5610 if (0 == data->need_update_smu7_dpm_table)
5611 return 0;
5612
5613 if ((0 == data->sclk_dpm_key_disabled) &&
5614 (data->need_update_smu7_dpm_table &
5615 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5616
5617 PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
5618 "Trying to Unfreeze SCLK DPM when DPM is disabled",
5619 );
5620 PP_ASSERT_WITH_CODE(
5621 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5622 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
5623 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
5624 return -1);
5625 }
5626
5627 if ((0 == data->mclk_dpm_key_disabled) &&
5628 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
5629
5630 PP_ASSERT_WITH_CODE(
5631 true == tonga_is_dpm_running(hwmgr),
5632 "Trying to Unfreeze MCLK DPM when DPM is disabled",
5633 );
5634 PP_ASSERT_WITH_CODE(
5635 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5636 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
5637 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
5638 return -1);
5639 }
5640
5641 data->need_update_smu7_dpm_table = 0;
5642
5643 return 0;
5644}
5645
5646static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
5647{
5648 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5649 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5650 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5651 uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps);
5652 uint8_t request;
5653
5654 if (data->pspp_notify_required ||
5655 data->pcie_performance_request) {
5656 if (target_link_speed == PP_PCIEGen3)
5657 request = PCIE_PERF_REQ_GEN3;
5658 else if (target_link_speed == PP_PCIEGen2)
5659 request = PCIE_PERF_REQ_GEN2;
5660 else
5661 request = PCIE_PERF_REQ_GEN1;
5662
5663 if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) {
5664 data->pcie_performance_request = false;
5665 return 0;
5666 }
5667
5668 if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
5669 if (PP_PCIEGen2 == target_link_speed)
5670 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
5671 else
5672 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
5673 }
5674 }
5675
5676 data->pcie_performance_request = false;
5677 return 0;
5678}
5679
5680static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
5681{
5682 int tmp_result, result = 0;
5683
5684 tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
5685 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
5686
5687 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
5688 tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input);
5689 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
5690 }
5691
5692 tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr);
5693 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
5694
5695 tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
5696 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
5697
5698 tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input);
5699 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
5700
5701 tmp_result = tonga_update_vce_dpm(hwmgr, input);
5702 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
5703
5704 tmp_result = tonga_update_sclk_threshold(hwmgr);
5705 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
5706
5707 tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr);
5708 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
5709
5710 tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr);
5711 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
5712
5713 tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr);
5714 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
5715
5716 tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr);
5717 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
5718
5719 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
5720 tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input);
5721 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
5722 }
5723
5724 return result;
5725}
5726
5727/**
5728* Set maximum target operating fan output PWM
5729*
5730* @param pHwMgr: the address of the powerplay hardware manager.
5731* @param usMaxFanPwm: max operating fan PWM in percents
5732* @return The response that came from the SMC.
5733*/
5734static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5735{
5736 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
5737
5738 if (phm_is_hw_access_blocked(hwmgr))
5739 return 0;
5740
5741 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
5742}
5743
5744int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
5745{
5746 uint32_t num_active_displays = 0;
5747 struct cgs_display_info info = {0};
5748 info.mode_info = NULL;
5749
5750 cgs_get_active_displays_info(hwmgr->device, &info);
5751
5752 num_active_displays = info.display_count;
5753
5754 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
5755 tonga_notify_smc_display_change(hwmgr, false);
5756 else
5757 tonga_notify_smc_display_change(hwmgr, true);
5758
5759 return 0;
5760}
5761
5762/**
5763* Programs the display gap
5764*
5765* @param hwmgr the address of the powerplay hardware manager.
5766* @return always OK
5767*/
5768int tonga_program_display_gap(struct pp_hwmgr *hwmgr)
5769{
5770 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5771 uint32_t num_active_displays = 0;
5772 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
5773 uint32_t display_gap2;
5774 uint32_t pre_vbi_time_in_us;
5775 uint32_t frame_time_in_us;
5776 uint32_t ref_clock;
5777 uint32_t refresh_rate = 0;
5778 struct cgs_display_info info = {0};
5779 struct cgs_mode_info mode_info;
5780
5781 info.mode_info = &mode_info;
5782
5783 cgs_get_active_displays_info(hwmgr->device, &info);
5784 num_active_displays = info.display_count;
5785
5786 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
5787 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
5788
5789 ref_clock = mode_info.ref_clock;
5790 refresh_rate = mode_info.refresh_rate;
5791
5792 if(0 == refresh_rate)
5793 refresh_rate = 60;
5794
5795 frame_time_in_us = 1000000 / refresh_rate;
5796
5797 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
5798 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
5799
5800 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
5801
5802 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64);
5803
5804 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
5805
5806 if (num_active_displays == 1)
5807 tonga_notify_smc_display_change(hwmgr, true);
5808
5809 return 0;
5810}
5811
5812int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
5813{
5814
5815 tonga_program_display_gap(hwmgr);
5816
5817 /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */
5818 return 0;
5819}
5820
5821/**
5822* Set maximum target operating fan output RPM
5823*
5824* @param pHwMgr: the address of the powerplay hardware manager.
5825* @param usMaxFanRpm: max operating fan RPM value.
5826* @return The response that came from the SMC.
5827*/
5828static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5829{
5830 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
5831
5832 if (phm_is_hw_access_blocked(hwmgr))
5833 return 0;
5834
5835 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
5836}
5837
5838uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
5839{
5840 uint32_t reference_clock;
5841 uint32_t tc;
5842 uint32_t divide;
5843
5844 ATOM_FIRMWARE_INFO *fw_info;
5845 uint16_t size;
5846 uint8_t frev, crev;
5847 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5848
5849 tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
5850
5851 if (tc)
5852 return TCLK;
5853
5854 fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
5855 &size, &frev, &crev);
5856
5857 if (!fw_info)
5858 return 0;
5859
5860 reference_clock = le16_to_cpu(fw_info->usMinPixelClockPLL_Output);
5861
5862 divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
5863
5864 if (0 != divide)
5865 return reference_clock / 4;
5866
5867 return reference_clock;
5868}
5869
5870int tonga_dpm_set_interrupt_state(void *private_data,
5871 unsigned src_id, unsigned type,
5872 int enabled)
5873{
5874 uint32_t cg_thermal_int;
5875 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
5876
5877 if (hwmgr == NULL)
5878 return -EINVAL;
5879
5880 switch (type) {
5881 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
5882 if (enabled) {
5883 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5884 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5885 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5886 } else {
5887 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5888 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5889 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5890 }
5891 break;
5892
5893 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5894 if (enabled) {
5895 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5896 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5897 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5898 } else {
5899 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5900 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5901 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5902 }
5903 break;
5904 default:
5905 break;
5906 }
5907 return 0;
5908}
5909
5910int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
5911 const void *thermal_interrupt_info)
5912{
5913 int result;
5914 const struct pp_interrupt_registration_info *info =
5915 (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
5916
5917 if (info == NULL)
5918 return -EINVAL;
5919
5920 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
5921 tonga_dpm_set_interrupt_state,
5922 info->call_back, info->context);
5923
5924 if (result)
5925 return -EINVAL;
5926
5927 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
5928 tonga_dpm_set_interrupt_state,
5929 info->call_back, info->context);
5930
5931 if (result)
5932 return -EINVAL;
5933
5934 return 0;
5935}
5936
5937bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5938{
5939 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5940 bool is_update_required = false;
5941 struct cgs_display_info info = {0,0,NULL};
5942
5943 cgs_get_active_displays_info(hwmgr->device, &info);
5944
5945 if (data->display_timing.num_existing_displays != info.display_count)
5946 is_update_required = true;
5947/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
5948 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5949 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
5950 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
5951 is_update_required = true;
5952*/
5953 return is_update_required;
5954}
5955
5956static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1,
5957 const struct tonga_performance_level *pl2)
5958{
5959 return ((pl1->memory_clock == pl2->memory_clock) &&
5960 (pl1->engine_clock == pl2->engine_clock) &&
5961 (pl1->pcie_gen == pl2->pcie_gen) &&
5962 (pl1->pcie_lane == pl2->pcie_lane));
5963}
5964
5965int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
5966{
5967 const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1);
5968 const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2);
5969 int i;
5970
5971 if (equal == NULL || psa == NULL || psb == NULL)
5972 return -EINVAL;
5973
5974 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5975 if (psa->performance_level_count != psb->performance_level_count) {
5976 *equal = false;
5977 return 0;
5978 }
5979
5980 for (i = 0; i < psa->performance_level_count; i++) {
5981 if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5982 /* If we have found even one performance level pair that is different the states are different. */
5983 *equal = false;
5984 return 0;
5985 }
5986 }
5987
5988 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5989 *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
5990 *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
5991 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5992 *equal &= (psa->acp_clk == psb->acp_clk);
5993
5994 return 0;
5995}
5996
5997static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5998{
5999 if (mode) {
6000 /* stop auto-manage */
6001 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
6002 PHM_PlatformCaps_MicrocodeFanControl))
6003 tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
6004 tonga_fan_ctrl_set_static_mode(hwmgr, mode);
6005 } else
6006 /* restart auto-manage */
6007 tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr);
6008
6009 return 0;
6010}
6011
6012static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
6013{
6014 if (hwmgr->fan_ctrl_is_in_default_mode)
6015 return hwmgr->fan_ctrl_default_mode;
6016 else
6017 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
6018 CG_FDO_CTRL2, FDO_PWM_MODE);
6019}
6020
6021static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
6022 .backend_init = &tonga_hwmgr_backend_init,
6023 .backend_fini = &tonga_hwmgr_backend_fini,
6024 .asic_setup = &tonga_setup_asic_task,
6025 .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
6026 .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
6027 .force_dpm_level = &tonga_force_dpm_level,
6028 .power_state_set = tonga_set_power_state_tasks,
6029 .get_power_state_size = tonga_get_power_state_size,
6030 .get_mclk = tonga_dpm_get_mclk,
6031 .get_sclk = tonga_dpm_get_sclk,
6032 .patch_boot_state = tonga_dpm_patch_boot_state,
6033 .get_pp_table_entry = tonga_get_pp_table_entry,
6034 .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
6035 .print_current_perforce_level = tonga_print_current_perforce_level,
6036 .powerdown_uvd = tonga_phm_powerdown_uvd,
6037 .powergate_uvd = tonga_phm_powergate_uvd,
6038 .powergate_vce = tonga_phm_powergate_vce,
6039 .disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
6040 .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
6041 .display_config_changed = tonga_display_configuration_changed_task,
6042 .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
6043 .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output,
6044 .get_temperature = tonga_thermal_get_temperature,
6045 .stop_thermal_controller = tonga_thermal_stop_thermal_controller,
6046 .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info,
6047 .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent,
6048 .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent,
6049 .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default,
6050 .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm,
6051 .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm,
6052 .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller,
6053 .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt,
6054 .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration,
6055 .check_states_equal = tonga_check_states_equal,
6056 .set_fan_control_mode = tonga_set_fan_control_mode,
6057 .get_fan_control_mode = tonga_get_fan_control_mode,
6058};
6059
6060int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
6061{
6062 tonga_hwmgr *data;
6063
6064 data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL);
6065 if (data == NULL)
6066 return -ENOMEM;
6067 memset(data, 0x00, sizeof(tonga_hwmgr));
6068
6069 hwmgr->backend = data;
6070 hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
6071 hwmgr->pptable_func = &tonga_pptable_funcs;
6072 pp_tonga_thermal_initialize(hwmgr);
6073 return 0;
6074}
6075
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
new file mode 100644
index 000000000000..49168d262ccc
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -0,0 +1,408 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef TONGA_HWMGR_H
24#define TONGA_HWMGR_H
25
26#include "hwmgr.h"
27#include "smu72_discrete.h"
28#include "ppatomctrl.h"
29#include "ppinterrupt.h"
30#include "tonga_powertune.h"
31
32#define TONGA_MAX_HARDWARE_POWERLEVELS 2
33#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
34
35struct tonga_performance_level {
36 uint32_t memory_clock;
37 uint32_t engine_clock;
38 uint16_t pcie_gen;
39 uint16_t pcie_lane;
40};
41
42struct _phw_tonga_bacos {
43 uint32_t best_match;
44 uint32_t baco_flags;
45 struct tonga_performance_level performance_level;
46};
47typedef struct _phw_tonga_bacos phw_tonga_bacos;
48
49struct _phw_tonga_uvd_clocks {
50 uint32_t VCLK;
51 uint32_t DCLK;
52};
53
54typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
55
56struct _phw_tonga_vce_clocks {
57 uint32_t EVCLK;
58 uint32_t ECCLK;
59};
60
61typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
62
63struct tonga_power_state {
64 uint32_t magic;
65 phw_tonga_uvd_clocks uvd_clocks;
66 phw_tonga_vce_clocks vce_clocks;
67 uint32_t sam_clk;
68 uint32_t acp_clk;
69 uint16_t performance_level_count;
70 bool dc_compatible;
71 uint32_t sclk_threshold;
72 struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
73};
74
75struct _phw_tonga_dpm_level {
76 bool enabled;
77 uint32_t value;
78 uint32_t param1;
79};
80typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
81
82#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
83#define MAX_REGULAR_DPM_NUMBER 8
84#define TONGA_MINIMUM_ENGINE_CLOCK 2500
85
86struct tonga_single_dpm_table {
87 uint32_t count;
88 phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
89};
90
91struct tonga_dpm_table {
92 struct tonga_single_dpm_table sclk_table;
93 struct tonga_single_dpm_table mclk_table;
94 struct tonga_single_dpm_table pcie_speed_table;
95 struct tonga_single_dpm_table vddc_table;
96 struct tonga_single_dpm_table vdd_gfx_table;
97 struct tonga_single_dpm_table vdd_ci_table;
98 struct tonga_single_dpm_table mvdd_table;
99};
100typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
101
102
103struct _phw_tonga_clock_regisiters {
104 uint32_t vCG_SPLL_FUNC_CNTL;
105 uint32_t vCG_SPLL_FUNC_CNTL_2;
106 uint32_t vCG_SPLL_FUNC_CNTL_3;
107 uint32_t vCG_SPLL_FUNC_CNTL_4;
108 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
109 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
110 uint32_t vDLL_CNTL;
111 uint32_t vMCLK_PWRMGT_CNTL;
112 uint32_t vMPLL_AD_FUNC_CNTL;
113 uint32_t vMPLL_DQ_FUNC_CNTL;
114 uint32_t vMPLL_FUNC_CNTL;
115 uint32_t vMPLL_FUNC_CNTL_1;
116 uint32_t vMPLL_FUNC_CNTL_2;
117 uint32_t vMPLL_SS1;
118 uint32_t vMPLL_SS2;
119};
120typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
121
122struct _phw_tonga_voltage_smio_registers {
123 uint32_t vs0_vid_lower_smio_cntl;
124};
125typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
126
127
128struct _phw_tonga_mc_reg_entry {
129 uint32_t mclk_max;
130 uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
131};
132typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
133
134struct _phw_tonga_mc_reg_table {
135 uint8_t last; /* number of registers*/
136 uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
137 uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
138 phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
139 SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
140};
141typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
142
143#define DISABLE_MC_LOADMICROCODE 1
144#define DISABLE_MC_CFGPROGRAMMING 2
145
146/*Ultra Low Voltage parameter structure */
147struct _phw_tonga_ulv_parm{
148 bool ulv_supported;
149 uint32_t ch_ulv_parameter;
150 uint32_t ulv_volt_change_delay;
151 struct tonga_performance_level ulv_power_level;
152};
153typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
154
155#define TONGA_MAX_LEAKAGE_COUNT 8
156
157struct _phw_tonga_leakage_voltage {
158 uint16_t count;
159 uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT];
160 uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
161};
162typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
163
164struct _phw_tonga_display_timing {
165 uint32_t min_clock_insr;
166 uint32_t num_existing_displays;
167};
168typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
169
170struct _phw_tonga_dpmlevel_enable_mask {
171 uint32_t uvd_dpm_enable_mask;
172 uint32_t vce_dpm_enable_mask;
173 uint32_t acp_dpm_enable_mask;
174 uint32_t samu_dpm_enable_mask;
175 uint32_t sclk_dpm_enable_mask;
176 uint32_t mclk_dpm_enable_mask;
177 uint32_t pcie_dpm_enable_mask;
178};
179typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
180
181struct _phw_tonga_pcie_perf_range {
182 uint16_t max;
183 uint16_t min;
184};
185typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
186
187struct _phw_tonga_vbios_boot_state {
188 uint16_t mvdd_bootup_value;
189 uint16_t vddc_bootup_value;
190 uint16_t vddci_bootup_value;
191 uint16_t vddgfx_bootup_value;
192 uint32_t sclk_bootup_value;
193 uint32_t mclk_bootup_value;
194 uint16_t pcie_gen_bootup_value;
195 uint16_t pcie_lane_bootup_value;
196};
197typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
198
199#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
200#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
201#define DPMTABLE_UPDATE_SCLK 0x00000004
202#define DPMTABLE_UPDATE_MCLK 0x00000008
203
204/* We need to review which fields are needed. */
205/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
206struct tonga_hwmgr {
207 struct tonga_dpm_table dpm_table;
208 struct tonga_dpm_table golden_dpm_table;
209
210 uint32_t voting_rights_clients0;
211 uint32_t voting_rights_clients1;
212 uint32_t voting_rights_clients2;
213 uint32_t voting_rights_clients3;
214 uint32_t voting_rights_clients4;
215 uint32_t voting_rights_clients5;
216 uint32_t voting_rights_clients6;
217 uint32_t voting_rights_clients7;
218 uint32_t static_screen_threshold_unit;
219 uint32_t static_screen_threshold;
220 uint32_t voltage_control;
221 uint32_t vdd_gfx_control;
222
223 uint32_t vddc_vddci_delta;
224 uint32_t vddc_vddgfx_delta;
225
226 struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
227 struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
228 struct pp_interrupt_registration_info smc_to_host_interrupt_info;
229 uint32_t active_auto_throttle_sources;
230
231 struct pp_interrupt_registration_info external_throttle_interrupt;
232 irq_handler_func_t external_throttle_callback;
233 void *external_throttle_context;
234
235 struct pp_interrupt_registration_info ctf_interrupt_info;
236 irq_handler_func_t ctf_callback;
237 void *ctf_context;
238
239 phw_tonga_clock_registers clock_registers;
240 phw_tonga_voltage_smio_registers voltage_smio_registers;
241
242 bool is_memory_GDDR5;
243 uint16_t acpi_vddc;
244 bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
245 uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
246 uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
247 uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
248 uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
249 uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
250 phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
251 phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
252 phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
253
254 uint32_t mvdd_control;
255 uint32_t vddc_mask_low;
256 uint32_t mvdd_mask_low;
257 uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
258 uint16_t min_vddc_in_pp_table;
259 uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
260 uint16_t min_vddci_in_pp_table;
261 uint32_t mclk_strobe_mode_threshold;
262 uint32_t mclk_stutter_mode_threshold;
263 uint32_t mclk_edc_enable_threshold;
264 uint32_t mclk_edc_wr_enable_threshold;
265 bool is_uvd_enabled;
266 bool is_xdma_enabled;
267 phw_tonga_vbios_boot_state vbios_boot_state;
268
269 bool battery_state;
270 bool is_tlu_enabled;
271 bool pcie_performance_request;
272
273 /* -------------- SMC SRAM Address of firmware header tables ----------------*/
274 uint32_t sram_end; /* The first address after the SMC SRAM. */
275 uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
276 uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
277 uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
278 uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
279 uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
280 SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
281 SMU72_Discrete_MCRegisters mc_reg_table;
282 SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
283 /* -------------- Stuff originally coming from Evergreen --------------------*/
284 phw_tonga_mc_reg_table tonga_mc_reg_table;
285 uint32_t vdd_ci_control;
286 pp_atomctrl_voltage_table vddc_voltage_table;
287 pp_atomctrl_voltage_table vddci_voltage_table;
288 pp_atomctrl_voltage_table vddgfx_voltage_table;
289 pp_atomctrl_voltage_table mvdd_voltage_table;
290
291 uint32_t mgcg_cgtt_local2;
292 uint32_t mgcg_cgtt_local3;
293 uint32_t gpio_debug;
294 uint32_t mc_micro_code_feature;
295 uint32_t highest_mclk;
296 uint16_t acpi_vdd_ci;
297 uint8_t mvdd_high_index;
298 uint8_t mvdd_low_index;
299 bool dll_defaule_on;
300 bool performance_request_registered;
301
302 /* ----------------- Low Power Features ---------------------*/
303 phw_tonga_bacos bacos;
304 phw_tonga_ulv_parm ulv;
305 /* ----------------- CAC Stuff ---------------------*/
306 uint32_t cac_table_start;
307 bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
308 bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
309 bool cac_enabled;
310 /* ----------------- DPM2 Parameters ---------------------*/
311 uint32_t power_containment_features;
312 bool enable_bapm_feature;
313 bool enable_tdc_limit_feature;
314 bool enable_pkg_pwr_tracking_feature;
315 bool disable_uvd_power_tune_feature;
316 phw_tonga_pt_defaults *power_tune_defaults;
317 SMU72_Discrete_PmFuses power_tune_table;
318 uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
319 uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
320
321 /* ----------------- Phase Shedding ---------------------*/
322 bool vddc_phase_shed_control;
323 /* --------------------- DI/DT --------------------------*/
324 phw_tonga_display_timing display_timing;
325 /* --------- ReadRegistry data for memory and engine clock margins ---- */
326 uint32_t engine_clock_data;
327 uint32_t memory_clock_data;
328 /* -------- Thermal Temperature Setting --------------*/
329 phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask;
330 uint32_t need_update_smu7_dpm_table;
331 uint32_t sclk_dpm_key_disabled;
332 uint32_t mclk_dpm_key_disabled;
333 uint32_t pcie_dpm_key_disabled;
334 uint32_t min_engine_clocks; /* used to store the previous dal min sclock */
335 phw_tonga_pcie_perf_range pcie_gen_performance;
336 phw_tonga_pcie_perf_range pcie_lane_performance;
337 phw_tonga_pcie_perf_range pcie_gen_power_saving;
338 phw_tonga_pcie_perf_range pcie_lane_power_saving;
339 bool use_pcie_performance_levels;
340 bool use_pcie_power_saving_levels;
341 uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
342 uint32_t mclk_activity_target;
343 uint32_t low_sclk_interrupt_threshold;
344 uint32_t last_mclk_dpm_enable_mask;
345 bool uvd_enabled;
346 uint32_t pcc_monitor_enabled;
347
348 /* --------- Power Gating States ------------*/
349 bool uvd_power_gated; /* 1: gated, 0:not gated */
350 bool vce_power_gated; /* 1: gated, 0:not gated */
351 bool samu_power_gated; /* 1: gated, 0:not gated */
352 bool acp_power_gated; /* 1: gated, 0:not gated */
353 bool pg_acp_init;
354
355};
356
357typedef struct tonga_hwmgr tonga_hwmgr;
358
359#define TONGA_DPM2_NEAR_TDP_DEC 10
360#define TONGA_DPM2_ABOVE_SAFE_INC 5
361#define TONGA_DPM2_BELOW_SAFE_INC 20
362
363#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
364
365#define TONGA_DPM2_LTS_TRUNCATE 0
366
367#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */
368
369#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */
370#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */
371
372#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50
373
374#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
375#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12
376#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
377#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
378#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
379
380#define TONGA_VOLTAGE_CONTROL_NONE 0x0
381#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1
382#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2
383#define TONGA_VOLTAGE_CONTROL_MERGED 0x3
384
385#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */
386
387#define TONGA_UNUSED_GPIO_PIN 0x7F
388
389#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
390#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
391
392#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
393#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
394
395#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
396#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
397
398#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
399
400int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
401int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
402int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
403int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
404int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
405uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
406
407#endif
408
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
new file mode 100644
index 000000000000..8e6670b3cb67
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_POWERTUNE_H
25#define TONGA_POWERTUNE_H
26
27enum _phw_tonga_ptc_config_reg_type {
28 TONGA_CONFIGREG_MMR = 0,
29 TONGA_CONFIGREG_SMC_IND,
30 TONGA_CONFIGREG_DIDT_IND,
31 TONGA_CONFIGREG_CACHE,
32
33 TONGA_CONFIGREG_MAX
34};
35typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type;
36
37/* PowerContainment Features */
38#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
39#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
40#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
41
42struct _phw_tonga_pt_config_reg {
43 uint32_t Offset;
44 uint32_t Mask;
45 uint32_t Shift;
46 uint32_t Value;
47 phw_tonga_ptc_config_reg_type Type;
48};
49typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg;
50
51struct _phw_tonga_pt_defaults {
52 uint8_t svi_load_line_en;
53 uint8_t svi_load_line_vddC;
54 uint8_t tdc_vddc_throttle_release_limit_perc;
55 uint8_t tdc_mawt;
56 uint8_t tdc_waterfall_ctl;
57 uint8_t dte_ambient_temp_base;
58 uint32_t display_cac;
59 uint32_t bamp_temp_gradient;
60 uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
61 uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
62};
63typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults;
64
65#endif
66
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
new file mode 100644
index 000000000000..9a4456e6521b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
@@ -0,0 +1,406 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_PPTABLE_H
25#define TONGA_PPTABLE_H
26
27/** \file
28 * This is a PowerPlay table header file
29 */
30#pragma pack(push, 1)
31
32#include "hwmgr.h"
33
34#define ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
35#define ATOM_TONGA_PP_FANPARAMETERS_NOFAN 0x80 /* No fan is connected to this controller. */
36
37#define ATOM_TONGA_PP_THERMALCONTROLLER_NONE 0
38#define ATOM_TONGA_PP_THERMALCONTROLLER_LM96163 17
39#define ATOM_TONGA_PP_THERMALCONTROLLER_TONGA 21
40#define ATOM_TONGA_PP_THERMALCONTROLLER_FIJI 22
41
42/*
43 * Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
44 * We probably should reserve the bit 0x80 for this use.
45 * To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
46 * The driver can pick the correct internal controller based on the ASIC.
47 */
48
49#define ATOM_TONGA_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 /* ADT7473 Fan Control + Internal Thermal Controller */
50#define ATOM_TONGA_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D /* EMC2103 Fan Control + Internal Thermal Controller */
51
52/*/* ATOM_TONGA_POWERPLAYTABLE::ulPlatformCaps */
53#define ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL 0x1 /* This cap indicates whether vddgfx will be a separated power rail. */
54#define ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY 0x2 /* This cap indicates whether this is a mobile part and CCC need to show Powerplay page. */
55#define ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x4 /* This cap indicates whether power source notificaiton is done by SBIOS directly. */
56#define ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND 0x8 /* Enable the option to overwrite voltage island feature to be disabled, regardless of VddGfx power rail support. */
57#define ____RETIRE16____ 0x10
58#define ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC 0x20 /* This cap indicates whether power source notificaiton is done by GPIO directly. */
59#define ____RETIRE64____ 0x40
60#define ____RETIRE128____ 0x80
61#define ____RETIRE256____ 0x100
62#define ____RETIRE512____ 0x200
63#define ____RETIRE1024____ 0x400
64#define ____RETIRE2048____ 0x800
65#define ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL 0x1000 /* This cap indicates dynamic MVDD is required. Uncheck to disable it. */
66#define ____RETIRE2000____ 0x2000
67#define ____RETIRE4000____ 0x4000
68#define ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 /* This cap indicates dynamic VDDCI is required. Uncheck to disable it. */
69#define ____RETIRE10000____ 0x10000
70#define ATOM_TONGA_PP_PLATFORM_CAP_BACO 0x20000 /* Enable to indicate the driver supports BACO state. */
71
72#define ATOM_TONGA_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x100000 /* Enable to indicate the driver supports thermal2GPIO17. */
73#define ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x1000000 /* Enable to indicate if thermal and PCC are sharing the same GPIO */
74#define ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE 0x2000000
75
76/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */
77#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
78#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
79#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
80#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
81#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
82#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
83/* 2, 4, 6, 7 are reserved */
84
85#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
86#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
87#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
88#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
89#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
90#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
91
92/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */
93#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
94
95#define ATOM_Tonga_DISALLOW_ON_DC 0x00004000
96#define ATOM_Tonga_ENABLE_VARIBRIGHT 0x00008000
97
98#define ATOM_Tonga_TABLE_REVISION_TONGA 7
99
100typedef struct _ATOM_Tonga_POWERPLAYTABLE {
101 ATOM_COMMON_TABLE_HEADER sHeader;
102
103 UCHAR ucTableRevision;
104 USHORT usTableSize; /*the size of header structure */
105
106 ULONG ulGoldenPPID;
107 ULONG ulGoldenRevision;
108 USHORT usFormatID;
109
110 USHORT usVoltageTime; /*in microseconds */
111 ULONG ulPlatformCaps; /*See ATOM_Tonga_CAPS_* */
112
113 ULONG ulMaxODEngineClock; /*For Overdrive. */
114 ULONG ulMaxODMemoryClock; /*For Overdrive. */
115
116 USHORT usPowerControlLimit;
117 USHORT usUlvVoltageOffset; /*in mv units */
118
119 USHORT usStateArrayOffset; /*points to ATOM_Tonga_State_Array */
120 USHORT usFanTableOffset; /*points to ATOM_Tonga_Fan_Table */
121 USHORT usThermalControllerOffset; /*points to ATOM_Tonga_Thermal_Controller */
122 USHORT usReserv; /*CustomThermalPolicy removed for Tonga. Keep this filed as reserved. */
123
124 USHORT usMclkDependencyTableOffset; /*points to ATOM_Tonga_MCLK_Dependency_Table */
125 USHORT usSclkDependencyTableOffset; /*points to ATOM_Tonga_SCLK_Dependency_Table */
126 USHORT usVddcLookupTableOffset; /*points to ATOM_Tonga_Voltage_Lookup_Table */
127 USHORT usVddgfxLookupTableOffset; /*points to ATOM_Tonga_Voltage_Lookup_Table */
128
129 USHORT usMMDependencyTableOffset; /*points to ATOM_Tonga_MM_Dependency_Table */
130
131 USHORT usVCEStateTableOffset; /*points to ATOM_Tonga_VCE_State_Table; */
132
133 USHORT usPPMTableOffset; /*points to ATOM_Tonga_PPM_Table */
134 USHORT usPowerTuneTableOffset; /*points to ATOM_PowerTune_Table */
135
136 USHORT usHardLimitTableOffset; /*points to ATOM_Tonga_Hard_Limit_Table */
137
138 USHORT usPCIETableOffset; /*points to ATOM_Tonga_PCIE_Table */
139
140 USHORT usGPIOTableOffset; /*points to ATOM_Tonga_GPIO_Table */
141
142 USHORT usReserved[6]; /*TODO: modify reserved size to fit structure aligning */
143} ATOM_Tonga_POWERPLAYTABLE;
144
145typedef struct _ATOM_Tonga_State {
146 UCHAR ucEngineClockIndexHigh;
147 UCHAR ucEngineClockIndexLow;
148
149 UCHAR ucMemoryClockIndexHigh;
150 UCHAR ucMemoryClockIndexLow;
151
152 UCHAR ucPCIEGenLow;
153 UCHAR ucPCIEGenHigh;
154
155 UCHAR ucPCIELaneLow;
156 UCHAR ucPCIELaneHigh;
157
158 USHORT usClassification;
159 ULONG ulCapsAndSettings;
160 USHORT usClassification2;
161 UCHAR ucUnused[4];
162} ATOM_Tonga_State;
163
164typedef struct _ATOM_Tonga_State_Array {
165 UCHAR ucRevId;
166 UCHAR ucNumEntries; /* Number of entries. */
167 ATOM_Tonga_State states[1]; /* Dynamically allocate entries. */
168} ATOM_Tonga_State_Array;
169
170typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
171 UCHAR ucVddcInd; /* Vddc voltage */
172 USHORT usVddci;
173 USHORT usVddgfxOffset; /* Offset relative to Vddc voltage */
174 USHORT usMvdd;
175 ULONG ulMclk;
176 USHORT usReserved;
177} ATOM_Tonga_MCLK_Dependency_Record;
178
179typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
180 UCHAR ucRevId;
181 UCHAR ucNumEntries; /* Number of entries. */
182 ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
183} ATOM_Tonga_MCLK_Dependency_Table;
184
185typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
186 UCHAR ucVddInd; /* Base voltage */
187 USHORT usVddcOffset; /* Offset relative to base voltage */
188 ULONG ulSclk;
189 USHORT usEdcCurrent;
190 UCHAR ucReliabilityTemperature;
191 UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
192} ATOM_Tonga_SCLK_Dependency_Record;
193
194typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
195 UCHAR ucRevId;
196 UCHAR ucNumEntries; /* Number of entries. */
197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
198} ATOM_Tonga_SCLK_Dependency_Table;
199
200typedef struct _ATOM_Tonga_PCIE_Record {
201 UCHAR ucPCIEGenSpeed;
202 UCHAR usPCIELaneWidth;
203 UCHAR ucReserved[2];
204} ATOM_Tonga_PCIE_Record;
205
206typedef struct _ATOM_Tonga_PCIE_Table {
207 UCHAR ucRevId;
208 UCHAR ucNumEntries; /* Number of entries. */
209 ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
210} ATOM_Tonga_PCIE_Table;
211
212typedef struct _ATOM_Tonga_MM_Dependency_Record {
213 UCHAR ucVddcInd; /* VDDC voltage */
214 USHORT usVddgfxOffset; /* Offset relative to VDDC voltage */
215 ULONG ulDClk; /* UVD D-clock */
216 ULONG ulVClk; /* UVD V-clock */
217 ULONG ulEClk; /* VCE clock */
218 ULONG ulAClk; /* ACP clock */
219 ULONG ulSAMUClk; /* SAMU clock */
220} ATOM_Tonga_MM_Dependency_Record;
221
222typedef struct _ATOM_Tonga_MM_Dependency_Table {
223 UCHAR ucRevId;
224 UCHAR ucNumEntries; /* Number of entries. */
225 ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */
226} ATOM_Tonga_MM_Dependency_Table;
227
228typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
229 USHORT usVdd; /* Base voltage */
230 USHORT usCACLow;
231 USHORT usCACMid;
232 USHORT usCACHigh;
233} ATOM_Tonga_Voltage_Lookup_Record;
234
235typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
236 UCHAR ucRevId;
237 UCHAR ucNumEntries; /* Number of entries. */
238 ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */
239} ATOM_Tonga_Voltage_Lookup_Table;
240
241typedef struct _ATOM_Tonga_Fan_Table {
242 UCHAR ucRevId; /* Change this if the table format changes or version changes so that the other fields are not the same. */
243 UCHAR ucTHyst; /* Temperature hysteresis. Integer. */
244 USHORT usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
245 USHORT usTMed; /* The middle temperature where we change slopes. */
246 USHORT usTHigh; /* The high point above TMed for adjusting the second slope. */
247 USHORT usPWMMin; /* The minimum PWM value in percent (0.01% increments). */
248 USHORT usPWMMed; /* The PWM value (in percent) at TMed. */
249 USHORT usPWMHigh; /* The PWM value at THigh. */
250 USHORT usTMax; /* The max temperature */
251 UCHAR ucFanControlMode; /* Legacy or Fuzzy Fan mode */
252 USHORT usFanPWMMax; /* Maximum allowed fan power in percent */
253 USHORT usFanOutputSensitivity; /* Sensitivity of fan reaction to temepature changes */
254 USHORT usFanRPMMax; /* The default value in RPM */
255 ULONG ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
256 UCHAR ucTargetTemperature; /* Advanced fan controller target temperature. */
257 UCHAR ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */
258 USHORT usReserved;
259} ATOM_Tonga_Fan_Table;
260
261typedef struct _ATOM_Fiji_Fan_Table {
262 UCHAR ucRevId; /* Change this if the table format changes or version changes so that the other fields are not the same. */
263 UCHAR ucTHyst; /* Temperature hysteresis. Integer. */
264 USHORT usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
265 USHORT usTMed; /* The middle temperature where we change slopes. */
266 USHORT usTHigh; /* The high point above TMed for adjusting the second slope. */
267 USHORT usPWMMin; /* The minimum PWM value in percent (0.01% increments). */
268 USHORT usPWMMed; /* The PWM value (in percent) at TMed. */
269 USHORT usPWMHigh; /* The PWM value at THigh. */
270 USHORT usTMax; /* The max temperature */
271 UCHAR ucFanControlMode; /* Legacy or Fuzzy Fan mode */
272 USHORT usFanPWMMax; /* Maximum allowed fan power in percent */
273 USHORT usFanOutputSensitivity; /* Sensitivity of fan reaction to temepature changes */
274 USHORT usFanRPMMax; /* The default value in RPM */
275 ULONG ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
276 UCHAR ucTargetTemperature; /* Advanced fan controller target temperature. */
277 UCHAR ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */
278 USHORT usFanGainEdge;
279 USHORT usFanGainHotspot;
280 USHORT usFanGainLiquid;
281 USHORT usFanGainVrVddc;
282 USHORT usFanGainVrMvdd;
283 USHORT usFanGainPlx;
284 USHORT usFanGainHbm;
285 USHORT usReserved;
286} ATOM_Fiji_Fan_Table;
287
288typedef struct _ATOM_Tonga_Thermal_Controller {
289 UCHAR ucRevId;
290 UCHAR ucType; /* one of ATOM_TONGA_PP_THERMALCONTROLLER_* */
291 UCHAR ucI2cLine; /* as interpreted by DAL I2C */
292 UCHAR ucI2cAddress;
293 UCHAR ucFanParameters; /* Fan Control Parameters. */
294 UCHAR ucFanMinRPM; /* Fan Minimum RPM (hundreds) -- for display purposes only. */
295 UCHAR ucFanMaxRPM; /* Fan Maximum RPM (hundreds) -- for display purposes only. */
296 UCHAR ucReserved;
297 UCHAR ucFlags; /* to be defined */
298} ATOM_Tonga_Thermal_Controller;
299
300typedef struct _ATOM_Tonga_VCE_State_Record {
301 UCHAR ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Tonga_MM_Dependency_Table' type */
302 UCHAR ucFlag; /* 2 bits indicates memory p-states */
303 UCHAR ucSCLKIndex; /*index into ATOM_Tonga_SCLK_Dependency_Table */
304 UCHAR ucMCLKIndex; /*index into ATOM_Tonga_MCLK_Dependency_Table */
305} ATOM_Tonga_VCE_State_Record;
306
307typedef struct _ATOM_Tonga_VCE_State_Table {
308 UCHAR ucRevId;
309 UCHAR ucNumEntries;
310 ATOM_Tonga_VCE_State_Record entries[1];
311} ATOM_Tonga_VCE_State_Table;
312
313typedef struct _ATOM_Tonga_PowerTune_Table {
314 UCHAR ucRevId;
315 USHORT usTDP;
316 USHORT usConfigurableTDP;
317 USHORT usTDC;
318 USHORT usBatteryPowerLimit;
319 USHORT usSmallPowerLimit;
320 USHORT usLowCACLeakage;
321 USHORT usHighCACLeakage;
322 USHORT usMaximumPowerDeliveryLimit;
323 USHORT usTjMax;
324 USHORT usPowerTuneDataSetID;
325 USHORT usEDCLimit;
326 USHORT usSoftwareShutdownTemp;
327 USHORT usClockStretchAmount;
328 USHORT usReserve[2];
329} ATOM_Tonga_PowerTune_Table;
330
331typedef struct _ATOM_Fiji_PowerTune_Table {
332 UCHAR ucRevId;
333 USHORT usTDP;
334 USHORT usConfigurableTDP;
335 USHORT usTDC;
336 USHORT usBatteryPowerLimit;
337 USHORT usSmallPowerLimit;
338 USHORT usLowCACLeakage;
339 USHORT usHighCACLeakage;
340 USHORT usMaximumPowerDeliveryLimit;
341 USHORT usTjMax; /* For Fiji, this is also usTemperatureLimitEdge; */
342 USHORT usPowerTuneDataSetID;
343 USHORT usEDCLimit;
344 USHORT usSoftwareShutdownTemp;
345 USHORT usClockStretchAmount;
346 USHORT usTemperatureLimitHotspot; /*The following are added for Fiji */
347 USHORT usTemperatureLimitLiquid1;
348 USHORT usTemperatureLimitLiquid2;
349 USHORT usTemperatureLimitVrVddc;
350 USHORT usTemperatureLimitVrMvdd;
351 USHORT usTemperatureLimitPlx;
352 UCHAR ucLiquid1_I2C_address; /*Liquid */
353 UCHAR ucLiquid2_I2C_address;
354 UCHAR ucLiquid_I2C_Line;
355 UCHAR ucVr_I2C_address; /*VR */
356 UCHAR ucVr_I2C_Line;
357 UCHAR ucPlx_I2C_address; /*PLX */
358 UCHAR ucPlx_I2C_Line;
359 USHORT usReserved;
360} ATOM_Fiji_PowerTune_Table;
361
362#define ATOM_PPM_A_A 1
363#define ATOM_PPM_A_I 2
364typedef struct _ATOM_Tonga_PPM_Table {
365 UCHAR ucRevId;
366 UCHAR ucPpmDesign; /*A+I or A+A */
367 USHORT usCpuCoreNumber;
368 ULONG ulPlatformTDP;
369 ULONG ulSmallACPlatformTDP;
370 ULONG ulPlatformTDC;
371 ULONG ulSmallACPlatformTDC;
372 ULONG ulApuTDP;
373 ULONG ulDGpuTDP;
374 ULONG ulDGpuUlvPower;
375 ULONG ulTjmax;
376} ATOM_Tonga_PPM_Table;
377
378typedef struct _ATOM_Tonga_Hard_Limit_Record {
379 ULONG ulSCLKLimit;
380 ULONG ulMCLKLimit;
381 USHORT usVddcLimit;
382 USHORT usVddciLimit;
383 USHORT usVddgfxLimit;
384} ATOM_Tonga_Hard_Limit_Record;
385
386typedef struct _ATOM_Tonga_Hard_Limit_Table {
387 UCHAR ucRevId;
388 UCHAR ucNumEntries;
389 ATOM_Tonga_Hard_Limit_Record entries[1];
390} ATOM_Tonga_Hard_Limit_Table;
391
392typedef struct _ATOM_Tonga_GPIO_Table {
393 UCHAR ucRevId;
394 UCHAR ucVRHotTriggeredSclkDpmIndex; /* If VRHot signal is triggered SCLK will be limited to this DPM level */
395 UCHAR ucReserve[5];
396} ATOM_Tonga_GPIO_Table;
397
398typedef struct _PPTable_Generic_SubTable_Header {
399 UCHAR ucRevId;
400} PPTable_Generic_SubTable_Header;
401
402
403#pragma pack(pop)
404
405
406#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
new file mode 100644
index 000000000000..34f4bef3691f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -0,0 +1,1142 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26
27#include "tonga_processpptables.h"
28#include "ppatomctrl.h"
29#include "atombios.h"
30#include "pp_debug.h"
31#include "hwmgr.h"
32#include "cgs_common.h"
33#include "tonga_pptable.h"
34
35/**
36 * Private Function used during initialization.
37 * @param hwmgr Pointer to the hardware manager.
38 * @param setIt A flag indication if the capability should be set (TRUE) or reset (FALSE).
39 * @param cap Which capability to set/reset.
40 */
41static void set_hw_cap(struct pp_hwmgr *hwmgr, bool setIt, enum phm_platform_caps cap)
42{
43 if (setIt)
44 phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
45 else
46 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
47}
48
49
50/**
51 * Private Function used during initialization.
52 * @param hwmgr Pointer to the hardware manager.
53 * @param powerplay_caps the bit array (from BIOS) of capability bits.
54 * @exception the current implementation always returns 1.
55 */
56static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
57{
58 PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE16____),
59 "ATOM_PP_PLATFORM_CAP_ASPM_L1 is not supported!", continue);
60 PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE64____),
61 "ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY is not supported!", continue);
62 PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE512____),
63 "ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL is not supported!", continue);
64 PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE1024____),
65 "ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 is not supported!", continue);
66 PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE2048____),
67 "ATOM_PP_PLATFORM_CAP_HTLINKCONTROL is not supported!", continue);
68
69 set_hw_cap(
70 hwmgr,
71 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY),
72 PHM_PlatformCaps_PowerPlaySupport
73 );
74
75 set_hw_cap(
76 hwmgr,
77 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
78 PHM_PlatformCaps_BiosPowerSourceControl
79 );
80
81 set_hw_cap(
82 hwmgr,
83 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC),
84 PHM_PlatformCaps_AutomaticDCTransition
85 );
86
87 set_hw_cap(
88 hwmgr,
89 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL),
90 PHM_PlatformCaps_EnableMVDDControl
91 );
92
93 set_hw_cap(
94 hwmgr,
95 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL),
96 PHM_PlatformCaps_ControlVDDCI
97 );
98
99 set_hw_cap(
100 hwmgr,
101 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL),
102 PHM_PlatformCaps_ControlVDDGFX
103 );
104
105 set_hw_cap(
106 hwmgr,
107 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_BACO),
108 PHM_PlatformCaps_BACO
109 );
110
111 set_hw_cap(
112 hwmgr,
113 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND),
114 PHM_PlatformCaps_DisableVoltageIsland
115 );
116
117 set_hw_cap(
118 hwmgr,
119 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL),
120 PHM_PlatformCaps_CombinePCCWithThermalSignal
121 );
122
123 set_hw_cap(
124 hwmgr,
125 0 != (powerplay_caps & ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE),
126 PHM_PlatformCaps_LoadPostProductionFirmware
127 );
128
129 return 0;
130}
131
132/**
133 * Private Function to get the PowerPlay Table Address.
134 */
135const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
136{
137 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
138
139 u16 size;
140 u8 frev, crev;
141 void *table_address;
142
143 table_address = (ATOM_Tonga_POWERPLAYTABLE *)
144 cgs_atom_get_data_table(hwmgr->device, index, &size, &frev, &crev);
145
146 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
147
148 return table_address;
149}
150
151static int get_vddc_lookup_table(
152 struct pp_hwmgr *hwmgr,
153 phm_ppt_v1_voltage_lookup_table **lookup_table,
154 const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
155 uint32_t max_levels
156 )
157{
158 uint32_t table_size, i;
159 phm_ppt_v1_voltage_lookup_table *table;
160
161 PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
162 "Invalid CAC Leakage PowerPlay Table!", return 1);
163
164 table_size = sizeof(uint32_t) +
165 sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
166
167 table = (phm_ppt_v1_voltage_lookup_table *)
168 kzalloc(table_size, GFP_KERNEL);
169
170 if (NULL == table)
171 return -ENOMEM;
172
173 memset(table, 0x00, table_size);
174
175 table->count = vddc_lookup_pp_tables->ucNumEntries;
176
177 for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
178 table->entries[i].us_calculated = 0;
179 table->entries[i].us_vdd =
180 vddc_lookup_pp_tables->entries[i].usVdd;
181 table->entries[i].us_cac_low =
182 vddc_lookup_pp_tables->entries[i].usCACLow;
183 table->entries[i].us_cac_mid =
184 vddc_lookup_pp_tables->entries[i].usCACMid;
185 table->entries[i].us_cac_high =
186 vddc_lookup_pp_tables->entries[i].usCACHigh;
187 }
188
189 *lookup_table = table;
190
191 return 0;
192}
193
194/**
195 * Private Function used during initialization.
196 * Initialize Platform Power Management Parameter table
197 * @param hwmgr Pointer to the hardware manager.
198 * @param atom_ppm_table Pointer to PPM table in VBIOS
199 */
200static int get_platform_power_management_table(
201 struct pp_hwmgr *hwmgr,
202 ATOM_Tonga_PPM_Table *atom_ppm_table)
203{
204 struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
205 struct phm_ppt_v1_information *pp_table_information =
206 (struct phm_ppt_v1_information *)(hwmgr->pptable);
207
208 if (NULL == ptr)
209 return -ENOMEM;
210
211 ptr->ppm_design
212 = atom_ppm_table->ucPpmDesign;
213 ptr->cpu_core_number
214 = atom_ppm_table->usCpuCoreNumber;
215 ptr->platform_tdp
216 = atom_ppm_table->ulPlatformTDP;
217 ptr->small_ac_platform_tdp
218 = atom_ppm_table->ulSmallACPlatformTDP;
219 ptr->platform_tdc
220 = atom_ppm_table->ulPlatformTDC;
221 ptr->small_ac_platform_tdc
222 = atom_ppm_table->ulSmallACPlatformTDC;
223 ptr->apu_tdp
224 = atom_ppm_table->ulApuTDP;
225 ptr->dgpu_tdp
226 = atom_ppm_table->ulDGpuTDP;
227 ptr->dgpu_ulv_power
228 = atom_ppm_table->ulDGpuUlvPower;
229 ptr->tj_max
230 = atom_ppm_table->ulTjmax;
231
232 pp_table_information->ppm_parameter_table = ptr;
233
234 return 0;
235}
236
237/**
238 * Private Function used during initialization.
239 * Initialize TDP limits for DPM2
240 * @param hwmgr Pointer to the hardware manager.
241 * @param powerplay_table Pointer to the PowerPlay Table.
242 */
243static int init_dpm_2_parameters(
244 struct pp_hwmgr *hwmgr,
245 const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
246 )
247{
248 int result = 0;
249 struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
250 ATOM_Tonga_PPM_Table *atom_ppm_table;
251 uint32_t disable_ppm = 0;
252 uint32_t disable_power_control = 0;
253
254 pp_table_information->us_ulv_voltage_offset =
255 le16_to_cpu(powerplay_table->usUlvVoltageOffset);
256
257 pp_table_information->ppm_parameter_table = NULL;
258 pp_table_information->vddc_lookup_table = NULL;
259 pp_table_information->vddgfx_lookup_table = NULL;
260 /* TDP limits */
261 hwmgr->platform_descriptor.TDPODLimit =
262 le16_to_cpu(powerplay_table->usPowerControlLimit);
263 hwmgr->platform_descriptor.TDPAdjustment = 0;
264 hwmgr->platform_descriptor.VidAdjustment = 0;
265 hwmgr->platform_descriptor.VidAdjustmentPolarity = 0;
266 hwmgr->platform_descriptor.VidMinLimit = 0;
267 hwmgr->platform_descriptor.VidMaxLimit = 1500000;
268 hwmgr->platform_descriptor.VidStep = 6250;
269
270 disable_power_control = 0;
271 if (0 == disable_power_control) {
272 /* enable TDP overdrive (PowerControl) feature as well if supported */
273 if (hwmgr->platform_descriptor.TDPODLimit != 0)
274 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
275 PHM_PlatformCaps_PowerControl);
276 }
277
278 if (0 != powerplay_table->usVddcLookupTableOffset) {
279 const ATOM_Tonga_Voltage_Lookup_Table *pVddcCACTable =
280 (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
281 le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
282
283 result = get_vddc_lookup_table(hwmgr,
284 &pp_table_information->vddc_lookup_table, pVddcCACTable, 16);
285 }
286
287 if (0 != powerplay_table->usVddgfxLookupTableOffset) {
288 const ATOM_Tonga_Voltage_Lookup_Table *pVddgfxCACTable =
289 (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
290 le16_to_cpu(powerplay_table->usVddgfxLookupTableOffset));
291
292 result = get_vddc_lookup_table(hwmgr,
293 &pp_table_information->vddgfx_lookup_table, pVddgfxCACTable, 16);
294 }
295
296 disable_ppm = 0;
297 if (0 == disable_ppm) {
298 atom_ppm_table = (ATOM_Tonga_PPM_Table *)
299 (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
300
301 if (0 != powerplay_table->usPPMTableOffset) {
302 if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
303 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
304 PHM_PlatformCaps_EnablePlatformPowerManagement);
305 }
306 }
307 }
308
309 return result;
310}
311
312static int get_valid_clk(
313 struct pp_hwmgr *hwmgr,
314 struct phm_clock_array **clk_table,
315 const phm_ppt_v1_clock_voltage_dependency_table * clk_volt_pp_table
316 )
317{
318 uint32_t table_size, i;
319 struct phm_clock_array *table;
320
321 PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
322 "Invalid PowerPlay Table!", return -1);
323
324 table_size = sizeof(uint32_t) +
325 sizeof(uint32_t) * clk_volt_pp_table->count;
326
327 table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL);
328
329 if (NULL == table)
330 return -ENOMEM;
331
332 memset(table, 0x00, table_size);
333
334 table->count = (uint32_t)clk_volt_pp_table->count;
335
336 for (i = 0; i < table->count; i++)
337 table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
338
339 *clk_table = table;
340
341 return 0;
342}
343
344static int get_hard_limits(
345 struct pp_hwmgr *hwmgr,
346 struct phm_clock_and_voltage_limits *limits,
347 const ATOM_Tonga_Hard_Limit_Table * limitable
348 )
349{
350 PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
351
352 /* currently we always take entries[0] parameters */
353 limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit;
354 limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit;
355 limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit;
356 limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit;
357 limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit;
358
359 return 0;
360}
361
362static int get_mclk_voltage_dependency_table(
363 struct pp_hwmgr *hwmgr,
364 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
365 const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
366 )
367{
368 uint32_t table_size, i;
369 phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
370
371 PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
372 "Invalid PowerPlay Table!", return -1);
373
374 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
375 * mclk_dep_table->ucNumEntries;
376
377 mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
378 kzalloc(table_size, GFP_KERNEL);
379
380 if (NULL == mclk_table)
381 return -ENOMEM;
382
383 memset(mclk_table, 0x00, table_size);
384
385 mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
386
387 for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
388 mclk_table->entries[i].vddInd =
389 mclk_dep_table->entries[i].ucVddcInd;
390 mclk_table->entries[i].vdd_offset =
391 mclk_dep_table->entries[i].usVddgfxOffset;
392 mclk_table->entries[i].vddci =
393 mclk_dep_table->entries[i].usVddci;
394 mclk_table->entries[i].mvdd =
395 mclk_dep_table->entries[i].usMvdd;
396 mclk_table->entries[i].clk =
397 mclk_dep_table->entries[i].ulMclk;
398 }
399
400 *pp_tonga_mclk_dep_table = mclk_table;
401
402 return 0;
403}
404
405static int get_sclk_voltage_dependency_table(
406 struct pp_hwmgr *hwmgr,
407 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
408 const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
409 )
410{
411 uint32_t table_size, i;
412 phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
413
414 PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
415 "Invalid PowerPlay Table!", return -1);
416
417 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
418 * sclk_dep_table->ucNumEntries;
419
420 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
421 kzalloc(table_size, GFP_KERNEL);
422
423 if (NULL == sclk_table)
424 return -ENOMEM;
425
426 memset(sclk_table, 0x00, table_size);
427
428 sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
429
430 for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
431 sclk_table->entries[i].vddInd =
432 sclk_dep_table->entries[i].ucVddInd;
433 sclk_table->entries[i].vdd_offset =
434 sclk_dep_table->entries[i].usVddcOffset;
435 sclk_table->entries[i].clk =
436 sclk_dep_table->entries[i].ulSclk;
437 sclk_table->entries[i].cks_enable =
438 (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
439 sclk_table->entries[i].cks_voffset =
440 (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
441 }
442
443 *pp_tonga_sclk_dep_table = sclk_table;
444
445 return 0;
446}
447
448static int get_pcie_table(
449 struct pp_hwmgr *hwmgr,
450 phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
451 const ATOM_Tonga_PCIE_Table * atom_pcie_table
452 )
453{
454 uint32_t table_size, i, pcie_count;
455 phm_ppt_v1_pcie_table *pcie_table;
456 struct phm_ppt_v1_information *pp_table_information =
457 (struct phm_ppt_v1_information *)(hwmgr->pptable);
458 PP_ASSERT_WITH_CODE((0 != atom_pcie_table->ucNumEntries),
459 "Invalid PowerPlay Table!", return -1);
460
461 table_size = sizeof(uint32_t) +
462 sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
463
464 pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
465
466 if (NULL == pcie_table)
467 return -ENOMEM;
468
469 memset(pcie_table, 0x00, table_size);
470
471 /*
472 * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
473 * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
474 */
475 pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
476 if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
477 pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
478 else
479 printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
480 Disregarding the excess entries... \n");
481
482 pcie_table->count = pcie_count;
483
484 for (i = 0; i < pcie_count; i++) {
485 pcie_table->entries[i].gen_speed =
486 atom_pcie_table->entries[i].ucPCIEGenSpeed;
487 pcie_table->entries[i].lane_width =
488 atom_pcie_table->entries[i].usPCIELaneWidth;
489 }
490
491 *pp_tonga_pcie_table = pcie_table;
492
493 return 0;
494}
495
496static int get_cac_tdp_table(
497 struct pp_hwmgr *hwmgr,
498 struct phm_cac_tdp_table **cac_tdp_table,
499 const PPTable_Generic_SubTable_Header * table
500 )
501{
502 uint32_t table_size;
503 struct phm_cac_tdp_table *tdp_table;
504
505 table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
506 tdp_table = kzalloc(table_size, GFP_KERNEL);
507
508 if (NULL == tdp_table)
509 return -ENOMEM;
510
511 memset(tdp_table, 0x00, table_size);
512
513 hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
514
515 if (NULL == hwmgr->dyn_state.cac_dtp_table)
516 return -ENOMEM;
517
518 memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
519
520 if (table->ucRevId < 3) {
521 const ATOM_Tonga_PowerTune_Table *tonga_table =
522 (ATOM_Tonga_PowerTune_Table *)table;
523 tdp_table->usTDP = tonga_table->usTDP;
524 tdp_table->usConfigurableTDP =
525 tonga_table->usConfigurableTDP;
526 tdp_table->usTDC = tonga_table->usTDC;
527 tdp_table->usBatteryPowerLimit =
528 tonga_table->usBatteryPowerLimit;
529 tdp_table->usSmallPowerLimit =
530 tonga_table->usSmallPowerLimit;
531 tdp_table->usLowCACLeakage =
532 tonga_table->usLowCACLeakage;
533 tdp_table->usHighCACLeakage =
534 tonga_table->usHighCACLeakage;
535 tdp_table->usMaximumPowerDeliveryLimit =
536 tonga_table->usMaximumPowerDeliveryLimit;
537 tdp_table->usDefaultTargetOperatingTemp =
538 tonga_table->usTjMax;
539 tdp_table->usTargetOperatingTemp =
540 tonga_table->usTjMax; /*Set the initial temp to the same as default */
541 tdp_table->usPowerTuneDataSetID =
542 tonga_table->usPowerTuneDataSetID;
543 tdp_table->usSoftwareShutdownTemp =
544 tonga_table->usSoftwareShutdownTemp;
545 tdp_table->usClockStretchAmount =
546 tonga_table->usClockStretchAmount;
547 } else { /* Fiji and newer */
548 const ATOM_Fiji_PowerTune_Table *fijitable =
549 (ATOM_Fiji_PowerTune_Table *)table;
550 tdp_table->usTDP = fijitable->usTDP;
551 tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP;
552 tdp_table->usTDC = fijitable->usTDC;
553 tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit;
554 tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit;
555 tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage;
556 tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage;
557 tdp_table->usMaximumPowerDeliveryLimit =
558 fijitable->usMaximumPowerDeliveryLimit;
559 tdp_table->usDefaultTargetOperatingTemp =
560 fijitable->usTjMax;
561 tdp_table->usTargetOperatingTemp =
562 fijitable->usTjMax; /*Set the initial temp to the same as default */
563 tdp_table->usPowerTuneDataSetID =
564 fijitable->usPowerTuneDataSetID;
565 tdp_table->usSoftwareShutdownTemp =
566 fijitable->usSoftwareShutdownTemp;
567 tdp_table->usClockStretchAmount =
568 fijitable->usClockStretchAmount;
569 tdp_table->usTemperatureLimitHotspot =
570 fijitable->usTemperatureLimitHotspot;
571 tdp_table->usTemperatureLimitLiquid1 =
572 fijitable->usTemperatureLimitLiquid1;
573 tdp_table->usTemperatureLimitLiquid2 =
574 fijitable->usTemperatureLimitLiquid2;
575 tdp_table->usTemperatureLimitVrVddc =
576 fijitable->usTemperatureLimitVrVddc;
577 tdp_table->usTemperatureLimitVrMvdd =
578 fijitable->usTemperatureLimitVrMvdd;
579 tdp_table->usTemperatureLimitPlx =
580 fijitable->usTemperatureLimitPlx;
581 tdp_table->ucLiquid1_I2C_address =
582 fijitable->ucLiquid1_I2C_address;
583 tdp_table->ucLiquid2_I2C_address =
584 fijitable->ucLiquid2_I2C_address;
585 tdp_table->ucLiquid_I2C_Line =
586 fijitable->ucLiquid_I2C_Line;
587 tdp_table->ucVr_I2C_address = fijitable->ucVr_I2C_address;
588 tdp_table->ucVr_I2C_Line = fijitable->ucVr_I2C_Line;
589 tdp_table->ucPlx_I2C_address = fijitable->ucPlx_I2C_address;
590 tdp_table->ucPlx_I2C_Line = fijitable->ucPlx_I2C_Line;
591 }
592
593 *cac_tdp_table = tdp_table;
594
595 return 0;
596}
597
598static int get_mm_clock_voltage_table(
599 struct pp_hwmgr *hwmgr,
600 phm_ppt_v1_mm_clock_voltage_dependency_table **tonga_mm_table,
601 const ATOM_Tonga_MM_Dependency_Table * mm_dependency_table
602 )
603{
604 uint32_t table_size, i;
605 const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
606 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
607
608 PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
609 "Invalid PowerPlay Table!", return -1);
610 table_size = sizeof(uint32_t) +
611 sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
612 * mm_dependency_table->ucNumEntries;
613 mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *)
614 kzalloc(table_size, GFP_KERNEL);
615
616 if (NULL == mm_table)
617 return -ENOMEM;
618
619 memset(mm_table, 0x00, table_size);
620
621 mm_table->count = mm_dependency_table->ucNumEntries;
622
623 for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
624 mm_dependency_record = &mm_dependency_table->entries[i];
625 mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
626 mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
627 mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
628 mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
629 mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
630 mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
631 mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
632 }
633
634 *tonga_mm_table = mm_table;
635
636 return 0;
637}
638
639/**
640 * Private Function used during initialization.
641 * Initialize clock voltage dependency
642 * @param hwmgr Pointer to the hardware manager.
643 * @param powerplay_table Pointer to the PowerPlay Table.
644 */
645static int init_clock_voltage_dependency(
646 struct pp_hwmgr *hwmgr,
647 const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
648 )
649{
650 int result = 0;
651 struct phm_ppt_v1_information *pp_table_information =
652 (struct phm_ppt_v1_information *)(hwmgr->pptable);
653
654 const ATOM_Tonga_MM_Dependency_Table *mm_dependency_table =
655 (const ATOM_Tonga_MM_Dependency_Table *)(((unsigned long) powerplay_table) +
656 le16_to_cpu(powerplay_table->usMMDependencyTableOffset));
657 const PPTable_Generic_SubTable_Header *pPowerTuneTable =
658 (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
659 le16_to_cpu(powerplay_table->usPowerTuneTableOffset));
660 const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
661 (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
662 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
663 const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
664 (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
665 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
666 const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
667 (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
668 le16_to_cpu(powerplay_table->usHardLimitTableOffset));
669 const ATOM_Tonga_PCIE_Table *pcie_table =
670 (const ATOM_Tonga_PCIE_Table *)(((unsigned long) powerplay_table) +
671 le16_to_cpu(powerplay_table->usPCIETableOffset));
672
673 pp_table_information->vdd_dep_on_sclk = NULL;
674 pp_table_information->vdd_dep_on_mclk = NULL;
675 pp_table_information->mm_dep_table = NULL;
676 pp_table_information->pcie_table = NULL;
677
678 if (powerplay_table->usMMDependencyTableOffset != 0)
679 result = get_mm_clock_voltage_table(hwmgr,
680 &pp_table_information->mm_dep_table, mm_dependency_table);
681
682 if (result == 0 && powerplay_table->usPowerTuneTableOffset != 0)
683 result = get_cac_tdp_table(hwmgr,
684 &pp_table_information->cac_dtp_table, pPowerTuneTable);
685
686 if (result == 0 && powerplay_table->usSclkDependencyTableOffset != 0)
687 result = get_sclk_voltage_dependency_table(hwmgr,
688 &pp_table_information->vdd_dep_on_sclk, sclk_dep_table);
689
690 if (result == 0 && powerplay_table->usMclkDependencyTableOffset != 0)
691 result = get_mclk_voltage_dependency_table(hwmgr,
692 &pp_table_information->vdd_dep_on_mclk, mclk_dep_table);
693
694 if (result == 0 && powerplay_table->usPCIETableOffset != 0)
695 result = get_pcie_table(hwmgr,
696 &pp_table_information->pcie_table, pcie_table);
697
698 if (result == 0 && powerplay_table->usHardLimitTableOffset != 0)
699 result = get_hard_limits(hwmgr,
700 &pp_table_information->max_clock_voltage_on_dc, pHardLimits);
701
702 hwmgr->dyn_state.max_clock_voltage_on_dc.sclk =
703 pp_table_information->max_clock_voltage_on_dc.sclk;
704 hwmgr->dyn_state.max_clock_voltage_on_dc.mclk =
705 pp_table_information->max_clock_voltage_on_dc.mclk;
706 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
707 pp_table_information->max_clock_voltage_on_dc.vddc;
708 hwmgr->dyn_state.max_clock_voltage_on_dc.vddci =
709 pp_table_information->max_clock_voltage_on_dc.vddci;
710
711 if (result == 0 && (NULL != pp_table_information->vdd_dep_on_mclk)
712 && (0 != pp_table_information->vdd_dep_on_mclk->count))
713 result = get_valid_clk(hwmgr, &pp_table_information->valid_mclk_values,
714 pp_table_information->vdd_dep_on_mclk);
715
716 if (result == 0 && (NULL != pp_table_information->vdd_dep_on_sclk)
717 && (0 != pp_table_information->vdd_dep_on_sclk->count))
718 result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
719 pp_table_information->vdd_dep_on_sclk);
720
721 return result;
722}
723
724/** Retrieves the (signed) Overdrive limits from VBIOS.
725 * The max engine clock, memory clock and max temperature come from the firmware info table.
726 *
727 * The information is placed into the platform descriptor.
728 *
729 * @param hwmgr source of the VBIOS table and owner of the platform descriptor to be updated.
730 * @param powerplay_table the address of the PowerPlay table.
731 *
732 * @return 1 as long as the firmware info table was present and of a supported version.
733 */
734static int init_over_drive_limits(
735 struct pp_hwmgr *hwmgr,
736 const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
737{
738 hwmgr->platform_descriptor.overdriveLimit.engineClock =
739 le16_to_cpu(powerplay_table->ulMaxODEngineClock);
740 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
741 le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
742
743 hwmgr->platform_descriptor.minOverdriveVDDC = 0;
744 hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
745 hwmgr->platform_descriptor.overdriveVDDCStep = 0;
746
747 if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \
748 && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) {
749 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
750 PHM_PlatformCaps_ACOverdriveSupport);
751 }
752
753 return 0;
754}
755
756/**
757 * Private Function used during initialization.
758 * Inspect the PowerPlay table for obvious signs of corruption.
759 * @param hwmgr Pointer to the hardware manager.
760 * @param powerplay_table Pointer to the PowerPlay Table.
761 * @exception This implementation always returns 1.
762 */
763static int init_thermal_controller(
764 struct pp_hwmgr *hwmgr,
765 const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
766 )
767{
768 const PPTable_Generic_SubTable_Header *fan_table;
769 ATOM_Tonga_Thermal_Controller *thermal_controller;
770
771 thermal_controller = (ATOM_Tonga_Thermal_Controller *)
772 (((unsigned long)powerplay_table) +
773 le16_to_cpu(powerplay_table->usThermalControllerOffset));
774 PP_ASSERT_WITH_CODE((0 != powerplay_table->usThermalControllerOffset),
775 "Thermal controller table not set!", return -1);
776
777 hwmgr->thermal_controller.ucType = thermal_controller->ucType;
778 hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
779 hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress;
780
781 hwmgr->thermal_controller.fanInfo.bNoFan =
782 (0 != (thermal_controller->ucFanParameters & ATOM_TONGA_PP_FANPARAMETERS_NOFAN));
783
784 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
785 thermal_controller->ucFanParameters &
786 ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
787
788 hwmgr->thermal_controller.fanInfo.ulMinRPM
789 = thermal_controller->ucFanMinRPM * 100UL;
790 hwmgr->thermal_controller.fanInfo.ulMaxRPM
791 = thermal_controller->ucFanMaxRPM * 100UL;
792
793 set_hw_cap(
794 hwmgr,
795 ATOM_TONGA_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
796 PHM_PlatformCaps_ThermalController
797 );
798
799 if (0 == powerplay_table->usFanTableOffset)
800 return 0;
801
802 fan_table = (const PPTable_Generic_SubTable_Header *)
803 (((unsigned long)powerplay_table) +
804 le16_to_cpu(powerplay_table->usFanTableOffset));
805
806 PP_ASSERT_WITH_CODE((0 != powerplay_table->usFanTableOffset),
807 "Fan table not set!", return -1);
808 PP_ASSERT_WITH_CODE((0 < fan_table->ucRevId),
809 "Unsupported fan table format!", return -1);
810
811 hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
812 = 100000;
813 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
814 PHM_PlatformCaps_MicrocodeFanControl);
815
816 if (fan_table->ucRevId < 8) {
817 const ATOM_Tonga_Fan_Table *tonga_fan_table =
818 (ATOM_Tonga_Fan_Table *)fan_table;
819 hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
820 = tonga_fan_table->ucTHyst;
821 hwmgr->thermal_controller.advanceFanControlParameters.usTMin
822 = tonga_fan_table->usTMin;
823 hwmgr->thermal_controller.advanceFanControlParameters.usTMed
824 = tonga_fan_table->usTMed;
825 hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
826 = tonga_fan_table->usTHigh;
827 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
828 = tonga_fan_table->usPWMMin;
829 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
830 = tonga_fan_table->usPWMMed;
831 hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
832 = tonga_fan_table->usPWMHigh;
833 hwmgr->thermal_controller.advanceFanControlParameters.usTMax
834 = 10900; /* hard coded */
835 hwmgr->thermal_controller.advanceFanControlParameters.usTMax
836 = tonga_fan_table->usTMax;
837 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
838 = tonga_fan_table->ucFanControlMode;
839 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
840 = tonga_fan_table->usFanPWMMax;
841 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
842 = 4836;
843 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
844 = tonga_fan_table->usFanOutputSensitivity;
845 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
846 = tonga_fan_table->usFanRPMMax;
847 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
848 = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */
849 hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
850 = tonga_fan_table->ucTargetTemperature;
851 hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
852 = tonga_fan_table->ucMinimumPWMLimit;
853 } else {
854 const ATOM_Fiji_Fan_Table *fiji_fan_table =
855 (ATOM_Fiji_Fan_Table *)fan_table;
856 hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
857 = fiji_fan_table->ucTHyst;
858 hwmgr->thermal_controller.advanceFanControlParameters.usTMin
859 = fiji_fan_table->usTMin;
860 hwmgr->thermal_controller.advanceFanControlParameters.usTMed
861 = fiji_fan_table->usTMed;
862 hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
863 = fiji_fan_table->usTHigh;
864 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
865 = fiji_fan_table->usPWMMin;
866 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
867 = fiji_fan_table->usPWMMed;
868 hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
869 = fiji_fan_table->usPWMHigh;
870 hwmgr->thermal_controller.advanceFanControlParameters.usTMax
871 = fiji_fan_table->usTMax;
872 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
873 = fiji_fan_table->ucFanControlMode;
874 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
875 = fiji_fan_table->usFanPWMMax;
876 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
877 = 4836;
878 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
879 = fiji_fan_table->usFanOutputSensitivity;
880 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
881 = fiji_fan_table->usFanRPMMax;
882 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
883 = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */
884 hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
885 = fiji_fan_table->ucTargetTemperature;
886 hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
887 = fiji_fan_table->ucMinimumPWMLimit;
888
889 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge
890 = fiji_fan_table->usFanGainEdge;
891 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot
892 = fiji_fan_table->usFanGainHotspot;
893 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid
894 = fiji_fan_table->usFanGainLiquid;
895 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc
896 = fiji_fan_table->usFanGainVrVddc;
897 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd
898 = fiji_fan_table->usFanGainVrMvdd;
899 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx
900 = fiji_fan_table->usFanGainPlx;
901 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm
902 = fiji_fan_table->usFanGainHbm;
903 }
904
905 return 0;
906}
907
908/**
909 * Private Function used during initialization.
910 * Inspect the PowerPlay table for obvious signs of corruption.
911 * @param hwmgr Pointer to the hardware manager.
912 * @param powerplay_table Pointer to the PowerPlay Table.
913 * @exception 2 if the powerplay table is incorrect.
914 */
915static int check_powerplay_tables(
916 struct pp_hwmgr *hwmgr,
917 const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
918 )
919{
920 const ATOM_Tonga_State_Array *state_arrays;
921
922 state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)powerplay_table) +
923 le16_to_cpu(powerplay_table->usStateArrayOffset));
924
925 PP_ASSERT_WITH_CODE((ATOM_Tonga_TABLE_REVISION_TONGA <=
926 powerplay_table->sHeader.ucTableFormatRevision),
927 "Unsupported PPTable format!", return -1);
928 PP_ASSERT_WITH_CODE((0 != powerplay_table->usStateArrayOffset),
929 "State table is not set!", return -1);
930 PP_ASSERT_WITH_CODE((0 < powerplay_table->sHeader.usStructureSize),
931 "Invalid PowerPlay Table!", return -1);
932 PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
933 "Invalid PowerPlay Table!", return -1);
934
935 return 0;
936}
937
938int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
939{
940 int result = 0;
941 const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
942
943 hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v1_information), GFP_KERNEL);
944
945 PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
946 "Failed to allocate hwmgr->pptable!", return -ENOMEM);
947
948 memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information));
949
950 powerplay_table = get_powerplay_table(hwmgr);
951
952 PP_ASSERT_WITH_CODE((NULL != powerplay_table),
953 "Missing PowerPlay Table!", return -1);
954
955 result = check_powerplay_tables(hwmgr, powerplay_table);
956
957 PP_ASSERT_WITH_CODE((result == 0),
958 "check_powerplay_tables failed", return result);
959
960 result = set_platform_caps(hwmgr,
961 le32_to_cpu(powerplay_table->ulPlatformCaps));
962
963 PP_ASSERT_WITH_CODE((result == 0),
964 "set_platform_caps failed", return result);
965
966 result = init_thermal_controller(hwmgr, powerplay_table);
967
968 PP_ASSERT_WITH_CODE((result == 0),
969 "init_thermal_controller failed", return result);
970
971 result = init_over_drive_limits(hwmgr, powerplay_table);
972
973 PP_ASSERT_WITH_CODE((result == 0),
974 "init_over_drive_limits failed", return result);
975
976 result = init_clock_voltage_dependency(hwmgr, powerplay_table);
977
978 PP_ASSERT_WITH_CODE((result == 0),
979 "init_clock_voltage_dependency failed", return result);
980
981 result = init_dpm_2_parameters(hwmgr, powerplay_table);
982
983 PP_ASSERT_WITH_CODE((result == 0),
984 "init_dpm_2_parameters failed", return result);
985
986 return result;
987}
988
989int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
990{
991 int result = 0;
992 struct phm_ppt_v1_information *pp_table_information =
993 (struct phm_ppt_v1_information *)(hwmgr->pptable);
994
995 if (NULL != hwmgr->soft_pp_table) {
996 kfree(hwmgr->soft_pp_table);
997 hwmgr->soft_pp_table = NULL;
998 }
999
1000 if (NULL != pp_table_information->vdd_dep_on_sclk)
1001 pp_table_information->vdd_dep_on_sclk = NULL;
1002
1003 if (NULL != pp_table_information->vdd_dep_on_mclk)
1004 pp_table_information->vdd_dep_on_mclk = NULL;
1005
1006 if (NULL != pp_table_information->valid_mclk_values)
1007 pp_table_information->valid_mclk_values = NULL;
1008
1009 if (NULL != pp_table_information->valid_sclk_values)
1010 pp_table_information->valid_sclk_values = NULL;
1011
1012 if (NULL != pp_table_information->vddc_lookup_table)
1013 pp_table_information->vddc_lookup_table = NULL;
1014
1015 if (NULL != pp_table_information->vddgfx_lookup_table)
1016 pp_table_information->vddgfx_lookup_table = NULL;
1017
1018 if (NULL != pp_table_information->mm_dep_table)
1019 pp_table_information->mm_dep_table = NULL;
1020
1021 if (NULL != pp_table_information->cac_dtp_table)
1022 pp_table_information->cac_dtp_table = NULL;
1023
1024 if (NULL != hwmgr->dyn_state.cac_dtp_table)
1025 hwmgr->dyn_state.cac_dtp_table = NULL;
1026
1027 if (NULL != pp_table_information->ppm_parameter_table)
1028 pp_table_information->ppm_parameter_table = NULL;
1029
1030 if (NULL != pp_table_information->pcie_table)
1031 pp_table_information->pcie_table = NULL;
1032
1033 if (NULL != hwmgr->pptable) {
1034 kfree(hwmgr->pptable);
1035 hwmgr->pptable = NULL;
1036 }
1037
1038 return result;
1039}
1040
1041const struct pp_table_func tonga_pptable_funcs = {
1042 .pptable_init = tonga_pp_tables_initialize,
1043 .pptable_fini = tonga_pp_tables_uninitialize,
1044};
1045
1046int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
1047{
1048 const ATOM_Tonga_State_Array * state_arrays;
1049 const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
1050
1051 PP_ASSERT_WITH_CODE((NULL != pp_table),
1052 "Missing PowerPlay Table!", return -1);
1053 PP_ASSERT_WITH_CODE((pp_table->sHeader.ucTableFormatRevision >=
1054 ATOM_Tonga_TABLE_REVISION_TONGA),
1055 "Incorrect PowerPlay table revision!", return -1);
1056
1057 state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
1058 le16_to_cpu(pp_table->usStateArrayOffset));
1059
1060 return (uint32_t)(state_arrays->ucNumEntries);
1061}
1062
1063/**
1064* Private function to convert flags stored in the BIOS to software flags in PowerPlay.
1065*/
1066static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
1067 uint16_t classification, uint16_t classification2)
1068{
1069 uint32_t result = 0;
1070
1071 if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
1072 result |= PP_StateClassificationFlag_Boot;
1073
1074 if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1075 result |= PP_StateClassificationFlag_Thermal;
1076
1077 if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
1078 result |= PP_StateClassificationFlag_LimitedPowerSource;
1079
1080 if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
1081 result |= PP_StateClassificationFlag_Rest;
1082
1083 if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
1084 result |= PP_StateClassificationFlag_Forced;
1085
1086 if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
1087 result |= PP_StateClassificationFlag_ACPI;
1088
1089 if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
1090 result |= PP_StateClassificationFlag_LimitedPowerSource_2;
1091
1092 return result;
1093}
1094
1095/**
1096* Create a Power State out of an entry in the PowerPlay table.
1097* This function is called by the hardware back-end.
1098* @param hwmgr Pointer to the hardware manager.
1099* @param entry_index The index of the entry to be extracted from the table.
1100* @param power_state The address of the PowerState instance being created.
1101* @return -1 if the entry cannot be retrieved.
1102*/
1103int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
1104 uint32_t entry_index, struct pp_power_state *power_state,
1105 int (*call_back_func)(struct pp_hwmgr *, void *,
1106 struct pp_power_state *, void *, uint32_t))
1107{
1108 int result = 0;
1109 const ATOM_Tonga_State_Array * state_arrays;
1110 const ATOM_Tonga_State *state_entry;
1111 const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
1112
1113 PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
1114 power_state->classification.bios_index = entry_index;
1115
1116 if (pp_table->sHeader.ucTableFormatRevision >=
1117 ATOM_Tonga_TABLE_REVISION_TONGA) {
1118 state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
1119 le16_to_cpu(pp_table->usStateArrayOffset));
1120
1121 PP_ASSERT_WITH_CODE((0 < pp_table->usStateArrayOffset),
1122 "Invalid PowerPlay Table State Array Offset.", return -1);
1123 PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
1124 "Invalid PowerPlay Table State Array.", return -1);
1125 PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
1126 "Invalid PowerPlay Table State Array Entry.", return -1);
1127
1128 state_entry = &(state_arrays->states[entry_index]);
1129
1130 result = call_back_func(hwmgr, (void *)state_entry, power_state,
1131 (void *)pp_table,
1132 make_classification_flags(hwmgr,
1133 le16_to_cpu(state_entry->usClassification),
1134 le16_to_cpu(state_entry->usClassification2)));
1135 }
1136
1137 if (!result && (power_state->classification.flags &
1138 PP_StateClassificationFlag_Boot))
1139 result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
1140
1141 return result;
1142}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
new file mode 100644
index 000000000000..d24b8887f466
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef TONGA_PROCESSPPTABLES_H
24#define TONGA_PROCESSPPTABLES_H
25
26#include "hwmgr.h"
27
28extern const struct pp_table_func tonga_pptable_funcs;
29extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
30extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
31 struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
32 struct pp_power_state *, void *, uint32_t));
33
34#endif
35
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
new file mode 100644
index 000000000000..a188174747c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
@@ -0,0 +1,590 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <asm/div64.h>
24#include "tonga_thermal.h"
25#include "tonga_hwmgr.h"
26#include "tonga_smumgr.h"
27#include "tonga_ppsmc.h"
28#include "smu/smu_7_1_2_d.h"
29#include "smu/smu_7_1_2_sh_mask.h"
30
31/**
32* Get Fan Speed Control Parameters.
33* @param hwmgr the address of the powerplay hardware manager.
34* @param pSpeed is the address of the structure where the result is to be placed.
35* @exception Always succeeds except if we cannot zero out the output structure.
36*/
37int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
38{
39
40 if (hwmgr->thermal_controller.fanInfo.bNoFan)
41 return 0;
42
43 fan_speed_info->supports_percent_read = true;
44 fan_speed_info->supports_percent_write = true;
45 fan_speed_info->min_percent = 0;
46 fan_speed_info->max_percent = 100;
47
48 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
49 fan_speed_info->supports_rpm_read = true;
50 fan_speed_info->supports_rpm_write = true;
51 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
52 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
53 } else {
54 fan_speed_info->min_rpm = 0;
55 fan_speed_info->max_rpm = 0;
56 }
57
58 return 0;
59}
60
61/**
62* Get Fan Speed in percent.
63* @param hwmgr the address of the powerplay hardware manager.
64* @param pSpeed is the address of the structure where the result is to be placed.
65* @exception Fails is the 100% setting appears to be 0.
66*/
67int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
68{
69 uint32_t duty100;
70 uint32_t duty;
71 uint64_t tmp64;
72
73 if (hwmgr->thermal_controller.fanInfo.bNoFan)
74 return 0;
75
76 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
77 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
78
79 if (0 == duty100)
80 return -EINVAL;
81
82
83 tmp64 = (uint64_t)duty * 100;
84 do_div(tmp64, duty100);
85 *speed = (uint32_t)tmp64;
86
87 if (*speed > 100)
88 *speed = 100;
89
90 return 0;
91}
92
93/**
94* Get Fan Speed in RPM.
95* @param hwmgr the address of the powerplay hardware manager.
96* @param speed is the address of the structure where the result is to be placed.
97* @exception Returns not supported if no fan is found or if pulses per revolution are not set
98*/
99int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
100{
101 return 0;
102}
103
104/**
105* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
106* @param hwmgr the address of the powerplay hardware manager.
107* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
108* @exception Should always succeed.
109*/
110int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
111{
112
113 if (hwmgr->fan_ctrl_is_in_default_mode) {
114 hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
115 hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
116 hwmgr->fan_ctrl_is_in_default_mode = false;
117 }
118
119 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
120 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
121
122 return 0;
123}
124
125/**
126* Reset Fan Speed Control to default mode.
127* @param hwmgr the address of the powerplay hardware manager.
128* @exception Should always succeed.
129*/
130int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
131{
132 if (!hwmgr->fan_ctrl_is_in_default_mode) {
133 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
134 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
135 hwmgr->fan_ctrl_is_in_default_mode = true;
136 }
137
138 return 0;
139}
140
141int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
142{
143 int result;
144
145 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
146 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
147 result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
148/*
149 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
150 hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
151 else
152 hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
153*/
154 } else {
155 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
156 result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
157 }
158/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
159 if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
160 result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
161 hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
162*/
163 return result;
164}
165
166
167int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
168{
169 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
170}
171
172/**
173* Set Fan Speed in percent.
174* @param hwmgr the address of the powerplay hardware manager.
175* @param speed is the percentage value (0% - 100%) to be set.
176* @exception Fails is the 100% setting appears to be 0.
177*/
178int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
179{
180 uint32_t duty100;
181 uint32_t duty;
182 uint64_t tmp64;
183
184 if (hwmgr->thermal_controller.fanInfo.bNoFan)
185 return -EINVAL;
186
187 if (speed > 100)
188 speed = 100;
189
190 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
191 tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
192
193 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
194
195 if (0 == duty100)
196 return -EINVAL;
197
198 tmp64 = (uint64_t)speed * 100;
199 do_div(tmp64, duty100);
200 duty = (uint32_t)tmp64;
201
202 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
203
204 return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
205}
206
207/**
208* Reset Fan Speed to default.
209* @param hwmgr the address of the powerplay hardware manager.
210* @exception Always succeeds.
211*/
212int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
213{
214 int result;
215
216 if (hwmgr->thermal_controller.fanInfo.bNoFan)
217 return 0;
218
219 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
220 result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
221 if (0 == result)
222 result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
223 } else
224 result = tonga_fan_ctrl_set_default_mode(hwmgr);
225
226 return result;
227}
228
229/**
230* Set Fan Speed in RPM.
231* @param hwmgr the address of the powerplay hardware manager.
232* @param speed is the percentage value (min - max) to be set.
233* @exception Fails is the speed not lie between min and max.
234*/
235int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
236{
237 return 0;
238}
239
240/**
241* Reads the remote temperature from the SIslands thermal controller.
242*
243* @param hwmgr The address of the hardware manager.
244*/
245int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
246{
247 int temp;
248
249 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
250
251/* Bit 9 means the reading is lower than the lowest usable value. */
252 if (0 != (0x200 & temp))
253 temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
254 else
255 temp = (temp & 0x1ff);
256
257 temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
258
259 return temp;
260}
261
262/**
263* Set the requested temperature range for high and low alert signals
264*
265* @param hwmgr The address of the hardware manager.
266* @param range Temperature range to be programmed for high and low alert signals
267* @exception PP_Result_BadInput if the input data is not valid.
268*/
269static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
270{
271 uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
272 uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
273
274 if (low < low_temp)
275 low = low_temp;
276 if (high > high_temp)
277 high = high_temp;
278
279 if (low > high)
280 return -EINVAL;
281
282 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
283 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
284 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
285
286 return 0;
287}
288
289/**
290* Programs thermal controller one-time setting registers
291*
292* @param hwmgr The address of the hardware manager.
293*/
294static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
295{
296 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
297 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
298 CG_TACH_CTRL, EDGE_PER_REV,
299 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
300
301 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
302
303 return 0;
304}
305
306/**
307* Enable thermal alerts on the RV770 thermal controller.
308*
309* @param hwmgr The address of the hardware manager.
310*/
311static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
312{
313 uint32_t alert;
314
315 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
316 alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
317 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
318
319 /* send message to SMU to enable internal thermal interrupts */
320 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
321}
322
323/**
324* Disable thermal alerts on the RV770 thermal controller.
325* @param hwmgr The address of the hardware manager.
326*/
327static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
328{
329 uint32_t alert;
330
331 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
332 alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
333 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
334
335 /* send message to SMU to disable internal thermal interrupts */
336 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
337}
338
339/**
340* Uninitialize the thermal controller.
341* Currently just disables alerts.
342* @param hwmgr The address of the hardware manager.
343*/
344int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
345{
346 int result = tonga_thermal_disable_alert(hwmgr);
347
348 if (hwmgr->thermal_controller.fanInfo.bNoFan)
349 tonga_fan_ctrl_set_default_mode(hwmgr);
350
351 return result;
352}
353
354/**
355* Set up the fan table to control the fan using the SMC.
356* @param hwmgr the address of the powerplay hardware manager.
357* @param pInput the pointer to input data
358* @param pOutput the pointer to output data
359* @param pStorage the pointer to temporary storage
360* @param Result the last failure code
361* @return result from set temperature range routine
362*/
363int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
364{
365 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
366 SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
367 uint32_t duty100;
368 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
369 uint16_t fdo_min, slope1, slope2;
370 uint32_t reference_clock;
371 int res;
372 uint64_t tmp64;
373
374 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
375 return 0;
376
377 if (0 == data->fan_table_start) {
378 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
379 return 0;
380 }
381
382 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
383
384 if (0 == duty100) {
385 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
386 return 0;
387 }
388
389 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
390 do_div(tmp64, 10000);
391 fdo_min = (uint16_t)tmp64;
392
393 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
394 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
395
396 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
397 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
398
399 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
400 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
401
402 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
403 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
404 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
405
406 fan_table.Slope1 = cpu_to_be16(slope1);
407 fan_table.Slope2 = cpu_to_be16(slope2);
408
409 fan_table.FdoMin = cpu_to_be16(fdo_min);
410
411 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
412
413 fan_table.HystUp = cpu_to_be16(1);
414
415 fan_table.HystSlope = cpu_to_be16(1);
416
417 fan_table.TempRespLim = cpu_to_be16(5);
418
419 reference_clock = tonga_get_xclk(hwmgr);
420
421 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
422
423 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
424
425 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
426
427 fan_table.FanControl_GL_Flag = 1;
428
429 res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
430/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
431 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
432 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
433 hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
434
435 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
436 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
437 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
438
439 if (0 != res)
440 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
441*/
442 return 0;
443}
444
445/**
446* Start the fan control on the SMC.
447* @param hwmgr the address of the powerplay hardware manager.
448* @param pInput the pointer to input data
449* @param pOutput the pointer to output data
450* @param pStorage the pointer to temporary storage
451* @param Result the last failure code
452* @return result from set temperature range routine
453*/
454int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
455{
456/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
457 * Make sure that we still think controlling the fan is OK.
458*/
459 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
460 tonga_fan_ctrl_start_smc_fan_control(hwmgr);
461 tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
462 }
463
464 return 0;
465}
466
467/**
468* Set temperature range for high and low alerts
469* @param hwmgr the address of the powerplay hardware manager.
470* @param pInput the pointer to input data
471* @param pOutput the pointer to output data
472* @param pStorage the pointer to temporary storage
473* @param Result the last failure code
474* @return result from set temperature range routine
475*/
476int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
477{
478 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
479
480 if (range == NULL)
481 return -EINVAL;
482
483 return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
484}
485
486/**
487* Programs one-time setting registers
488* @param hwmgr the address of the powerplay hardware manager.
489* @param pInput the pointer to input data
490* @param pOutput the pointer to output data
491* @param pStorage the pointer to temporary storage
492* @param Result the last failure code
493* @return result from initialize thermal controller routine
494*/
495int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
496{
497 return tonga_thermal_initialize(hwmgr);
498}
499
500/**
501* Enable high and low alerts
502* @param hwmgr the address of the powerplay hardware manager.
503* @param pInput the pointer to input data
504* @param pOutput the pointer to output data
505* @param pStorage the pointer to temporary storage
506* @param Result the last failure code
507* @return result from enable alert routine
508*/
509int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
510{
511 return tonga_thermal_enable_alert(hwmgr);
512}
513
514/**
515* Disable high and low alerts
516* @param hwmgr the address of the powerplay hardware manager.
517* @param pInput the pointer to input data
518* @param pOutput the pointer to output data
519* @param pStorage the pointer to temporary storage
520* @param Result the last failure code
521* @return result from disable alert routine
522*/
523static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
524{
525 return tonga_thermal_disable_alert(hwmgr);
526}
527
528static struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
529 { NULL, tf_tonga_thermal_initialize },
530 { NULL, tf_tonga_thermal_set_temperature_range },
531 { NULL, tf_tonga_thermal_enable_alert },
532/* We should restrict performance levels to low before we halt the SMC.
533 * On the other hand we are still in boot state when we do this so it would be pointless.
534 * If this assumption changes we have to revisit this table.
535 */
536 { NULL, tf_tonga_thermal_setup_fan_table},
537 { NULL, tf_tonga_thermal_start_smc_fan_control},
538 { NULL, NULL }
539};
540
541static struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
542 0,
543 PHM_MasterTableFlag_None,
544 tonga_thermal_start_thermal_controller_master_list
545};
546
547static struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
548 { NULL, tf_tonga_thermal_disable_alert},
549 { NULL, tf_tonga_thermal_set_temperature_range},
550 { NULL, tf_tonga_thermal_enable_alert},
551 { NULL, NULL }
552};
553
554struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
555 0,
556 PHM_MasterTableFlag_None,
557 tonga_thermal_set_temperature_range_master_list
558};
559
560int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
561{
562 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
563 tonga_fan_ctrl_set_default_mode(hwmgr);
564 return 0;
565}
566
567/**
568* Initializes the thermal controller related functions in the Hardware Manager structure.
569* @param hwmgr The address of the hardware manager.
570* @exception Any error code from the low-level communication.
571*/
572int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
573{
574 int result;
575
576 result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
577
578 if (0 == result) {
579 result = phm_construct_table(hwmgr,
580 &tonga_thermal_start_thermal_controller_master,
581 &(hwmgr->start_thermal_controller));
582 if (0 != result)
583 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
584 }
585
586 if (0 == result)
587 hwmgr->fan_ctrl_is_in_default_mode = true;
588 return result;
589}
590
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
new file mode 100644
index 000000000000..aa335f267e25
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_THERMAL_H
25#define TONGA_THERMAL_H
26
27#include "hwmgr.h"
28
29#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1
30#define TONGA_THERMAL_LOW_ALERT_MASK 0x2
31
32#define TONGA_THERMAL_MINIMUM_TEMP_READING -256
33#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0
36#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59
60#endif
61
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
new file mode 100644
index 000000000000..e61a3e67852e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -0,0 +1,299 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _AMD_POWERPLAY_H_
24#define _AMD_POWERPLAY_H_
25
26#include <linux/seq_file.h>
27#include <linux/types.h>
28#include <linux/errno.h>
29#include "amd_shared.h"
30#include "cgs_common.h"
31
32enum amd_pp_event {
33 AMD_PP_EVENT_INITIALIZE = 0,
34 AMD_PP_EVENT_UNINITIALIZE,
35 AMD_PP_EVENT_POWER_SOURCE_CHANGE,
36 AMD_PP_EVENT_SUSPEND,
37 AMD_PP_EVENT_RESUME,
38 AMD_PP_EVENT_ENTER_REST_STATE,
39 AMD_PP_EVENT_EXIT_REST_STATE,
40 AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE,
41 AMD_PP_EVENT_THERMAL_NOTIFICATION,
42 AMD_PP_EVENT_VBIOS_NOTIFICATION,
43 AMD_PP_EVENT_ENTER_THERMAL_STATE,
44 AMD_PP_EVENT_EXIT_THERMAL_STATE,
45 AMD_PP_EVENT_ENTER_FORCED_STATE,
46 AMD_PP_EVENT_EXIT_FORCED_STATE,
47 AMD_PP_EVENT_ENTER_EXCLUSIVE_MODE,
48 AMD_PP_EVENT_EXIT_EXCLUSIVE_MODE,
49 AMD_PP_EVENT_ENTER_SCREEN_SAVER,
50 AMD_PP_EVENT_EXIT_SCREEN_SAVER,
51 AMD_PP_EVENT_VPU_RECOVERY_BEGIN,
52 AMD_PP_EVENT_VPU_RECOVERY_END,
53 AMD_PP_EVENT_ENABLE_POWER_PLAY,
54 AMD_PP_EVENT_DISABLE_POWER_PLAY,
55 AMD_PP_EVENT_CHANGE_POWER_SOURCE_UI_LABEL,
56 AMD_PP_EVENT_ENABLE_USER2D_PERFORMANCE,
57 AMD_PP_EVENT_DISABLE_USER2D_PERFORMANCE,
58 AMD_PP_EVENT_ENABLE_USER3D_PERFORMANCE,
59 AMD_PP_EVENT_DISABLE_USER3D_PERFORMANCE,
60 AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST,
61 AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST,
62 AMD_PP_EVENT_ENABLE_REDUCED_REFRESH_RATE,
63 AMD_PP_EVENT_DISABLE_REDUCED_REFRESH_RATE,
64 AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING,
65 AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING,
66 AMD_PP_EVENT_ENABLE_CGPG,
67 AMD_PP_EVENT_DISABLE_CGPG,
68 AMD_PP_EVENT_ENTER_TEXT_MODE,
69 AMD_PP_EVENT_EXIT_TEXT_MODE,
70 AMD_PP_EVENT_VIDEO_START,
71 AMD_PP_EVENT_VIDEO_STOP,
72 AMD_PP_EVENT_ENABLE_USER_STATE,
73 AMD_PP_EVENT_DISABLE_USER_STATE,
74 AMD_PP_EVENT_READJUST_POWER_STATE,
75 AMD_PP_EVENT_START_INACTIVITY,
76 AMD_PP_EVENT_STOP_INACTIVITY,
77 AMD_PP_EVENT_LINKED_ADAPTERS_READY,
78 AMD_PP_EVENT_ADAPTER_SAFE_TO_DISABLE,
79 AMD_PP_EVENT_COMPLETE_INIT,
80 AMD_PP_EVENT_CRITICAL_THERMAL_FAULT,
81 AMD_PP_EVENT_BACKLIGHT_CHANGED,
82 AMD_PP_EVENT_ENABLE_VARI_BRIGHT,
83 AMD_PP_EVENT_DISABLE_VARI_BRIGHT,
84 AMD_PP_EVENT_ENABLE_VARI_BRIGHT_ON_POWER_XPRESS,
85 AMD_PP_EVENT_DISABLE_VARI_BRIGHT_ON_POWER_XPRESS,
86 AMD_PP_EVENT_SET_VARI_BRIGHT_LEVEL,
87 AMD_PP_EVENT_VARI_BRIGHT_MONITOR_MEASUREMENT,
88 AMD_PP_EVENT_SCREEN_ON,
89 AMD_PP_EVENT_SCREEN_OFF,
90 AMD_PP_EVENT_PRE_DISPLAY_CONFIG_CHANGE,
91 AMD_PP_EVENT_ENTER_ULP_STATE,
92 AMD_PP_EVENT_EXIT_ULP_STATE,
93 AMD_PP_EVENT_REGISTER_IP_STATE,
94 AMD_PP_EVENT_UNREGISTER_IP_STATE,
95 AMD_PP_EVENT_ENTER_MGPU_MODE,
96 AMD_PP_EVENT_EXIT_MGPU_MODE,
97 AMD_PP_EVENT_ENTER_MULTI_GPU_MODE,
98 AMD_PP_EVENT_PRE_SUSPEND,
99 AMD_PP_EVENT_PRE_RESUME,
100 AMD_PP_EVENT_ENTER_BACOS,
101 AMD_PP_EVENT_EXIT_BACOS,
102 AMD_PP_EVENT_RESUME_BACO,
103 AMD_PP_EVENT_RESET_BACO,
104 AMD_PP_EVENT_PRE_DISPLAY_PHY_ACCESS,
105 AMD_PP_EVENT_POST_DISPLAY_PHY_CCESS,
106 AMD_PP_EVENT_START_COMPUTE_APPLICATION,
107 AMD_PP_EVENT_STOP_COMPUTE_APPLICATION,
108 AMD_PP_EVENT_REDUCE_POWER_LIMIT,
109 AMD_PP_EVENT_ENTER_FRAME_LOCK,
110 AMD_PP_EVENT_EXIT_FRAME_LOOCK,
111 AMD_PP_EVENT_LONG_IDLE_REQUEST_BACO,
112 AMD_PP_EVENT_LONG_IDLE_ENTER_BACO,
113 AMD_PP_EVENT_LONG_IDLE_EXIT_BACO,
114 AMD_PP_EVENT_HIBERNATE,
115 AMD_PP_EVENT_CONNECTED_STANDBY,
116 AMD_PP_EVENT_ENTER_SELF_REFRESH,
117 AMD_PP_EVENT_EXIT_SELF_REFRESH,
118 AMD_PP_EVENT_START_AVFS_BTC,
119 AMD_PP_EVENT_MAX
120};
121
122enum amd_dpm_forced_level {
123 AMD_DPM_FORCED_LEVEL_AUTO = 0,
124 AMD_DPM_FORCED_LEVEL_LOW = 1,
125 AMD_DPM_FORCED_LEVEL_HIGH = 2,
126};
127
128struct amd_pp_init {
129 struct cgs_device *device;
130 uint32_t chip_family;
131 uint32_t chip_id;
132 uint32_t rev_id;
133};
134enum amd_pp_display_config_type{
135 AMD_PP_DisplayConfigType_None = 0,
136 AMD_PP_DisplayConfigType_DP54 ,
137 AMD_PP_DisplayConfigType_DP432 ,
138 AMD_PP_DisplayConfigType_DP324 ,
139 AMD_PP_DisplayConfigType_DP27,
140 AMD_PP_DisplayConfigType_DP243,
141 AMD_PP_DisplayConfigType_DP216,
142 AMD_PP_DisplayConfigType_DP162,
143 AMD_PP_DisplayConfigType_HDMI6G ,
144 AMD_PP_DisplayConfigType_HDMI297 ,
145 AMD_PP_DisplayConfigType_HDMI162,
146 AMD_PP_DisplayConfigType_LVDS,
147 AMD_PP_DisplayConfigType_DVI,
148 AMD_PP_DisplayConfigType_WIRELESS,
149 AMD_PP_DisplayConfigType_VGA
150};
151
152struct single_display_configuration
153{
154 uint32_t controller_index;
155 uint32_t controller_id;
156 uint32_t signal_type;
157 uint32_t display_state;
158 /* phy id for the primary internal transmitter */
159 uint8_t primary_transmitter_phyi_d;
160 /* bitmap with the active lanes */
161 uint8_t primary_transmitter_active_lanemap;
162 /* phy id for the secondary internal transmitter (for dual-link dvi) */
163 uint8_t secondary_transmitter_phy_id;
164 /* bitmap with the active lanes */
165 uint8_t secondary_transmitter_active_lanemap;
166 /* misc phy settings for SMU. */
167 uint32_t config_flags;
168 uint32_t display_type;
169 uint32_t view_resolution_cx;
170 uint32_t view_resolution_cy;
171 enum amd_pp_display_config_type displayconfigtype;
172 uint32_t vertical_refresh; /* for active display */
173};
174
175#define MAX_NUM_DISPLAY 32
176
177struct amd_pp_display_configuration {
178 bool nb_pstate_switch_disable;/* controls NB PState switch */
179 bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
180 bool cpu_pstate_disable;
181 uint32_t cpu_pstate_separation_time;
182
183 uint32_t num_display; /* total number of display*/
184 uint32_t num_path_including_non_display;
185 uint32_t crossfire_display_index;
186 uint32_t min_mem_set_clock;
187 uint32_t min_core_set_clock;
188 /* unit 10KHz x bit*/
189 uint32_t min_bus_bandwidth;
190 /* minimum required stutter sclk, in 10khz uint32_t ulMinCoreSetClk;*/
191 uint32_t min_core_set_clock_in_sr;
192
193 struct single_display_configuration displays[MAX_NUM_DISPLAY];
194
195 uint32_t vrefresh; /* for active display*/
196
197 uint32_t min_vblank_time; /* for active display*/
198 bool multi_monitor_in_sync;
199 /* Controller Index of primary display - used in MCLK SMC switching hang
200 * SW Workaround*/
201 uint32_t crtc_index;
202 /* htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/
203 uint32_t line_time_in_us;
204 bool invalid_vblank_time;
205
206 uint32_t display_clk;
207 /*
208 * for given display configuration if multimonitormnsync == false then
209 * Memory clock DPMS with this latency or below is allowed, DPMS with
210 * higher latency not allowed.
211 */
212 uint32_t dce_tolerable_mclk_in_active_latency;
213};
214
215struct amd_pp_dal_clock_info {
216 uint32_t engine_max_clock;
217 uint32_t memory_max_clock;
218 uint32_t level;
219};
220
221enum {
222 PP_GROUP_UNKNOWN = 0,
223 PP_GROUP_GFX = 1,
224 PP_GROUP_SYS,
225 PP_GROUP_MAX
226};
227
228#define PP_GROUP_MASK 0xF0000000
229#define PP_GROUP_SHIFT 28
230
231#define PP_BLOCK_MASK 0x0FFFFF00
232#define PP_BLOCK_SHIFT 8
233
234#define PP_BLOCK_GFX_CG 0x01
235#define PP_BLOCK_GFX_MG 0x02
236#define PP_BLOCK_SYS_BIF 0x01
237#define PP_BLOCK_SYS_MC 0x02
238#define PP_BLOCK_SYS_ROM 0x04
239#define PP_BLOCK_SYS_DRM 0x08
240#define PP_BLOCK_SYS_HDP 0x10
241#define PP_BLOCK_SYS_SDMA 0x20
242
243#define PP_STATE_MASK 0x0000000F
244#define PP_STATE_SHIFT 0
245#define PP_STATE_SUPPORT_MASK 0x000000F0
246#define PP_STATE_SUPPORT_SHIFT 0
247
248#define PP_STATE_CG 0x01
249#define PP_STATE_LS 0x02
250#define PP_STATE_DS 0x04
251#define PP_STATE_SD 0x08
252#define PP_STATE_SUPPORT_CG 0x10
253#define PP_STATE_SUPPORT_LS 0x20
254#define PP_STATE_SUPPORT_DS 0x40
255#define PP_STATE_SUPPORT_SD 0x80
256
257#define PP_CG_MSG_ID(group, block, support, state) (group << PP_GROUP_SHIFT |\
258 block << PP_BLOCK_SHIFT |\
259 support << PP_STATE_SUPPORT_SHIFT |\
260 state << PP_STATE_SHIFT)
261
262struct amd_powerplay_funcs {
263 int (*get_temperature)(void *handle);
264 int (*load_firmware)(void *handle);
265 int (*wait_for_fw_loading_complete)(void *handle);
266 int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
267 enum amd_dpm_forced_level (*get_performance_level)(void *handle);
268 enum amd_pm_state_type (*get_current_power_state)(void *handle);
269 int (*get_sclk)(void *handle, bool low);
270 int (*get_mclk)(void *handle, bool low);
271 int (*powergate_vce)(void *handle, bool gate);
272 int (*powergate_uvd)(void *handle, bool gate);
273 int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
274 void *input, void *output);
275 void (*print_current_performance_level)(void *handle,
276 struct seq_file *m);
277 int (*set_fan_control_mode)(void *handle, uint32_t mode);
278 int (*get_fan_control_mode)(void *handle);
279 int (*set_fan_speed_percent)(void *handle, uint32_t percent);
280 int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
281};
282
283struct amd_powerplay {
284 void *pp_handle;
285 const struct amd_ip_funcs *ip_funcs;
286 const struct amd_powerplay_funcs *pp_funcs;
287};
288
289int amd_powerplay_init(struct amd_pp_init *pp_init,
290 struct amd_powerplay *amd_pp);
291int amd_powerplay_fini(void *handle);
292
293int amd_powerplay_display_configuration_change(void *handle, const void *input);
294
295int amd_powerplay_get_display_power_level(void *handle,
296 struct amd_pp_dal_clock_info *output);
297
298
299#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h
index 273616ab43db..9b698780aed8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h
@@ -164,6 +164,7 @@ enum DPM_ARRAY {
164#define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C) 164#define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C)
165#define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D) 165#define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D)
166#define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E) 166#define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E)
167#define PPSMC_MSG_SetDisplaySizePowerParams ((uint16_t) 0x26F)
167 168
168/* REMOVE LATER*/ 169/* REMOVE LATER*/
169#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) 170#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
new file mode 100644
index 000000000000..b9d84de8a44d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _EVENT_MANAGER_H_
24#define _EVENT_MANAGER_H_
25
26#include "power_state.h"
27#include "pp_power_source.h"
28#include "hardwaremanager.h"
29#include "pp_asicblocks.h"
30
31struct pp_eventmgr;
32enum amd_pp_event;
33
34enum PEM_EventDataValid {
35 PEM_EventDataValid_RequestedStateID = 0,
36 PEM_EventDataValid_RequestedUILabel,
37 PEM_EventDataValid_NewPowerState,
38 PEM_EventDataValid_RequestedPowerSource,
39 PEM_EventDataValid_RequestedClocks,
40 PEM_EventDataValid_CurrentTemperature,
41 PEM_EventDataValid_AsicBlocks,
42 PEM_EventDataValid_ODParameters,
43 PEM_EventDataValid_PXAdapterPrefs,
44 PEM_EventDataValid_PXUserPrefs,
45 PEM_EventDataValid_PXSwitchReason,
46 PEM_EventDataValid_PXSwitchPhase,
47 PEM_EventDataValid_HdVideo,
48 PEM_EventDataValid_BacklightLevel,
49 PEM_EventDatavalid_VariBrightParams,
50 PEM_EventDataValid_VariBrightLevel,
51 PEM_EventDataValid_VariBrightImmediateChange,
52 PEM_EventDataValid_PercentWhite,
53 PEM_EventDataValid_SdVideo,
54 PEM_EventDataValid_HTLinkChangeReason,
55 PEM_EventDataValid_HWBlocks,
56 PEM_EventDataValid_RequestedThermalState,
57 PEM_EventDataValid_MvcVideo,
58 PEM_EventDataValid_Max
59};
60
61typedef enum PEM_EventDataValid PEM_EventDataValid;
62
63/* Number of bits in ULONG variable */
64#define PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD (sizeof(unsigned long)*8)
65
66/* Number of ULONG entries used by event data valid bits */
67#define PEM_MAX_NUM_EVENTDATAVALID_ULONG_ENTRIES \
68 ((PEM_EventDataValid_Max + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD - 1) / \
69 PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)
70
71static inline void pem_set_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
72{
73 fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] |=
74 (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
75}
76
77static inline void pem_unset_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
78{
79 fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &=
80 ~(1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
81}
82
83static inline unsigned long pem_is_event_data_valid(const unsigned long *fields, PEM_EventDataValid valid_field)
84{
85 return fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &
86 (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
87}
88
89struct pem_event_data {
90 unsigned long valid_fields[100];
91 unsigned long requested_state_id;
92 enum PP_StateUILabel requested_ui_label;
93 struct pp_power_state *pnew_power_state;
94 enum pp_power_source requested_power_source;
95 struct PP_Clocks requested_clocks;
96 bool skip_state_adjust_rules;
97 struct phm_asic_blocks asic_blocks;
98 /* to doPP_ThermalState requestedThermalState;
99 enum ThermalStateRequestSrc requestThermalStateSrc;
100 PP_Temperature currentTemperature;*/
101
102};
103
104int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event,
105 struct pem_event_data *event_data);
106
107bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr);
108
109#endif /* _EVENT_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
new file mode 100644
index 000000000000..10437dcfd365
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
@@ -0,0 +1,125 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _EVENTMGR_H_
25#define _EVENTMGR_H_
26
27#include <linux/mutex.h>
28#include "pp_instance.h"
29#include "hardwaremanager.h"
30#include "eventmanager.h"
31#include "pp_feature.h"
32#include "pp_power_source.h"
33#include "power_state.h"
34
35typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr,
36 struct pem_event_data *event_data);
37
38struct action_chain {
39 const char *description; /* action chain description for debugging purpose */
40 const pem_event_action **action_chain; /* pointer to chain of event actions */
41};
42
43struct pem_power_source_ui_state_info {
44 enum PP_StateUILabel current_ui_label;
45 enum PP_StateUILabel default_ui_lable;
46 unsigned long configurable_ui_mapping;
47};
48
49struct pp_clock_range {
50 uint32_t min_sclk_khz;
51 uint32_t max_sclk_khz;
52
53 uint32_t min_mclk_khz;
54 uint32_t max_mclk_khz;
55
56 uint32_t min_vclk_khz;
57 uint32_t max_vclk_khz;
58
59 uint32_t min_dclk_khz;
60 uint32_t max_dclk_khz;
61
62 uint32_t min_aclk_khz;
63 uint32_t max_aclk_khz;
64
65 uint32_t min_eclk_khz;
66 uint32_t max_eclk_khz;
67};
68
69enum pp_state {
70 UNINITIALIZED,
71 INACTIVE,
72 ACTIVE
73};
74
75enum pp_ring_index {
76 PP_RING_TYPE_GFX_INDEX = 0,
77 PP_RING_TYPE_DMA_INDEX,
78 PP_RING_TYPE_DMA1_INDEX,
79 PP_RING_TYPE_UVD_INDEX,
80 PP_RING_TYPE_VCE0_INDEX,
81 PP_RING_TYPE_VCE1_INDEX,
82 PP_RING_TYPE_CP1_INDEX,
83 PP_RING_TYPE_CP2_INDEX,
84 PP_NUM_RINGS,
85};
86
87struct pp_request {
88 uint32_t flags;
89 uint32_t sclk;
90 uint32_t sclk_throttle;
91 uint32_t mclk;
92 uint32_t vclk;
93 uint32_t dclk;
94 uint32_t eclk;
95 uint32_t aclk;
96 uint32_t iclk;
97 uint32_t vp8clk;
98 uint32_t rsv[32];
99};
100
101struct pp_eventmgr {
102 struct pp_hwmgr *hwmgr;
103 struct pp_smumgr *smumgr;
104
105 struct pp_feature_info features[PP_Feature_Max];
106 const struct action_chain *event_chain[AMD_PP_EVENT_MAX];
107 struct phm_platform_descriptor *platform_descriptor;
108 struct pp_clock_range clock_range;
109 enum pp_power_source current_power_source;
110 struct pem_power_source_ui_state_info ui_state_info[PP_PowerSource_Max];
111 enum pp_state states[PP_NUM_RINGS];
112 struct pp_request hi_req;
113 struct list_head context_list;
114 struct mutex lock;
115 bool block_adjust_power_state;
116 bool enable_cg;
117 bool enable_gfx_cgpg;
118 int (*pp_eventmgr_init)(struct pp_eventmgr *eventmgr);
119 void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr);
120};
121
122int eventmgr_init(struct pp_instance *handle);
123int eventmgr_fini(struct pp_eventmgr *eventmgr);
124
125#endif /* _EVENTMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h
new file mode 100644
index 000000000000..7ae494569a60
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h
@@ -0,0 +1,412 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24
25#ifndef _FIJI_PP_SMC_H_
26#define _FIJI_PP_SMC_H_
27
28#pragma pack(push, 1)
29
30#define PPSMC_SWSTATE_FLAG_DC 0x01
31#define PPSMC_SWSTATE_FLAG_UVD 0x02
32#define PPSMC_SWSTATE_FLAG_VCE 0x04
33
34#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
35#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
36#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
37
38#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
39#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
40#define PPSMC_SYSTEMFLAG_GDDR5 0x04
41
42#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
43
44#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
45#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
46
47#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
48#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
49
50#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
51#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
52
53/* Defines for DPM 2.0 */
54#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
55#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
56#define PPSMC_DPM2FLAGS_OCP 0x04
57
58/* Defines for display watermark level */
59#define PPSMC_DISPLAY_WATERMARK_LOW 0
60#define PPSMC_DISPLAY_WATERMARK_HIGH 1
61
62/* In the HW performance level's state flags: */
63#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
64#define PPSMC_STATEFLAG_POWERBOOST 0x02
65#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
66#define PPSMC_STATEFLAG_POWERSHIFT 0x08
67#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
68#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
69#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
70
71/* Fan control algorithm: */
72#define FDO_MODE_HARDWARE 0
73#define FDO_MODE_PIECE_WISE_LINEAR 1
74
75enum FAN_CONTROL {
76 FAN_CONTROL_FUZZY,
77 FAN_CONTROL_TABLE
78};
79
80/* Gemini Modes*/
81#define PPSMC_GeminiModeNone 0 /*Single GPU board*/
82#define PPSMC_GeminiModeMaster 1 /*Master GPU on a Gemini board*/
83#define PPSMC_GeminiModeSlave 2 /*Slave GPU on a Gemini board*/
84
85
86/* Return codes for driver to SMC communication. */
87#define PPSMC_Result_OK ((uint16_t)0x01)
88#define PPSMC_Result_NoMore ((uint16_t)0x02)
89
90#define PPSMC_Result_NotNow ((uint16_t)0x03)
91
92#define PPSMC_Result_Failed ((uint16_t)0xFF)
93#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
94#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
95
96#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
97
98
99#define PPSMC_MSG_Halt ((uint16_t)0x10)
100#define PPSMC_MSG_Resume ((uint16_t)0x11)
101#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
102#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
103#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
104#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
105#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
106#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
107#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
108#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
109#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
110#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
111
112#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
113#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
114#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
115#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
116#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
117
118#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
119#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
120#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
121#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
122#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
123#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
124#define PPSMC_CACHistoryStart ((uint16_t)0x57)
125#define PPSMC_CACHistoryStop ((uint16_t)0x58)
126#define PPSMC_TDPClampingActive ((uint16_t)0x59)
127#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
128#define PPSMC_StartFanControl ((uint16_t)0x5B)
129#define PPSMC_StopFanControl ((uint16_t)0x5C)
130#define PPSMC_NoDisplay ((uint16_t)0x5D)
131#define PPSMC_HasDisplay ((uint16_t)0x5E)
132#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
133#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
134#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
135#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
136#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
137#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
138#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
139#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
140#define PPSMC_OCPActive ((uint16_t)0x6C)
141#define PPSMC_OCPInactive ((uint16_t)0x6D)
142#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
143#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
144#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
145#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
146#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
147#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
148#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
149#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
150#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
151#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
152#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
153#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
154#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
155#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
156#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
157#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
158
159#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
160#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
161#define PPSMC_FlushDataCache ((uint16_t)0x80)
162#define PPSMC_FlushInstrCache ((uint16_t)0x81)
163
164#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
165#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
166
167#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
168
169#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
170#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
171#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
172#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
173
174#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
175
176#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
177
178/* Trinity Specific Messages*/
179#define PPSMC_MSG_Test ((uint16_t) 0x100)
180#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
181#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
182#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
183#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
184#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
185#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
186#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
187#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
188#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
189#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
190#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
191#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
192#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
193#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
194#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
195#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
196#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
197#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
198#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
199#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
200#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
201#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
202#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
203#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
204#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
205#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
206#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
207#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
208#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
209#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
210#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
211#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
212#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
213#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
214#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
215#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
216
217#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
218#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
219#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
220#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
221#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
222#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
223#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
224#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
225#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
226#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
227#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
228#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
229#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
230#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
231#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
232#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
233#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
234#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
235#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
236#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
237#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
238#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
239#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
240#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
241#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
242#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
243#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
244#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
245#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
246#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
247#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
248#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
249#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
250#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
251#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
252
253#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
254#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
255
256#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
257#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
258#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
259#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
260#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
261#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
262#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
263#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
264#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
265#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
266#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
267#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
268#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
269#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
270#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
271#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
272#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
273#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
274#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
275#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
276#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
277#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
278#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
279#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
280#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
281#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
282#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
283#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
284#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
285#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
286#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
287#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
288#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
289#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
290#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
291#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
292#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
293#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
294#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
295#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
296#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
297#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
298#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
299#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
300#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
301#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
302#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
303#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
304#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
305#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
306#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
307#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
308#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
309#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
310#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
311#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
312#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
313#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
314#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
315#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
316#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
317#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
318#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
319#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
320#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
321#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
322#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
323#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
324#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
325#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
326#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
327#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
328#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
329#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
330#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
331#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
332#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
333#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
334#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
335#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
336#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
337
338#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
339#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
340#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
341#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
342#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
343#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
344#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
345#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
346#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
347
348#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
349#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
350#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
351#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
352#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
353#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
354#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
355
356#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
357#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
358#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
359#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
360#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
361#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
362#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
363#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
364#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
365#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
366#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
367#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
368#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
369#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
370#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
371#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
372#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
373#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
374#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
375#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
376#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
377#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
378#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
379#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
380#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
381#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
382#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
383#define PPSMC_MSG_GetHbmCode ((uint16_t) 0x26D)
384#define PPSMC_MSG_GetVrVddcTemperature ((uint16_t) 0x26E)
385#define PPSMC_MSG_GetVrMvddTemperature ((uint16_t) 0x26F)
386#define PPSMC_MSG_GetLiquidTemperature ((uint16_t) 0x270)
387#define PPSMC_MSG_GetPlxTemperature ((uint16_t) 0x271)
388#define PPSMC_MSG_RequestI2CControl ((uint16_t) 0x272)
389#define PPSMC_MSG_ReleaseI2CControl ((uint16_t) 0x273)
390#define PPSMC_MSG_LedConfig ((uint16_t) 0x274)
391#define PPSMC_MSG_SetHbmFanCode ((uint16_t) 0x275)
392#define PPSMC_MSG_SetHbmThrottleCode ((uint16_t) 0x276)
393
394#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
395#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
396#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
397#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
398#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
399
400/* AVFS Only - Remove Later */
401#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x666)
402
403/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/
404#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
405#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
406#define PPSMC_EVENT_STATUS_DC 0x00000004
407
408typedef uint16_t PPSMC_Msg;
409
410#pragma pack(pop)
411
412#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
new file mode 100644
index 000000000000..0262ad35502a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
@@ -0,0 +1,10299 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _FIJI_PWRVIRUS_H_
24#define _FIJI_PWRVIRUS_H_
25
26#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
27#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
28#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
29#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d
30
31enum PWR_Command
32{
33 PwrCmdNull = 0,
34 PwrCmdWrite,
35 PwrCmdEnd,
36 PwrCmdMax
37};
38typedef enum PWR_Command PWR_Command;
39
40struct PWR_Command_Table
41{
42 PWR_Command command;
43 ULONG data;
44 ULONG reg;
45};
46typedef struct PWR_Command_Table PWR_Command_Table;
47
48#define PWR_VIRUS_TABLE_SIZE 10243
49static PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] =
50{
51 { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX },
52 { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
53 { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX },
54 { PwrCmdWrite, 0x0300078c, mmPCIE_DATA },
55 { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL },
56 { PwrCmdWrite, 0x00000001, mmBIF_CLK_CTRL },
57 { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL },
58 { PwrCmdWrite, 0x00000003, mmBIF_FB_EN },
59 { PwrCmdWrite, 0x00000000, mmBIF_FB_EN },
60 { PwrCmdWrite, 0x00000001, mmBIF_DOORBELL_APER_EN },
61 { PwrCmdWrite, 0x00000000, mmBIF_DOORBELL_APER_EN },
62 { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
63 { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
64 { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
65 { PwrCmdWrite, 0x22000000, mmPCIE_DATA },
66 { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
67 { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
68 /*
69 { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
70 { PwrCmdWrite, 0x00000000, mmMC_CITF_CNTL },
71 { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
72 { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
73 { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
74 { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
75 { PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET },*/
76 { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_LO },
77 { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_HI },
78 { PwrCmdWrite, 0x00000000, mmRLC_CSIB_LENGTH },
79 /*
80 { PwrCmdWrite, 0x00000000, mmMC_VM_MX_L1_TLB_CNTL },
81 { PwrCmdWrite, 0x00000001, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR },
82 { PwrCmdWrite, 0x00000000, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR },
83 { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
84 { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },*/
85 { PwrCmdWrite, 0x00000000, mmVM_CONTEXT0_CNTL },
86 { PwrCmdWrite, 0x00000000, mmVM_CONTEXT1_CNTL },
87 /*
88 { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_BASE },
89 { PwrCmdWrite, 0x00000002, mmMC_VM_AGP_BOT },
90 { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_TOP },*/
91 { PwrCmdWrite, 0x04000000, mmATC_VM_APERTURE0_LOW_ADDR },
92 { PwrCmdWrite, 0x0400ff20, mmATC_VM_APERTURE0_HIGH_ADDR },
93 { PwrCmdWrite, 0x00000002, mmATC_VM_APERTURE0_CNTL },
94 { PwrCmdWrite, 0x0000ffff, mmATC_VM_APERTURE0_CNTL2 },
95 { PwrCmdWrite, 0x00000001, mmATC_VM_APERTURE1_LOW_ADDR },
96 { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_HIGH_ADDR },
97 { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL },
98 { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL2 },
99 //{ PwrCmdWrite, 0x00000000, mmMC_ARB_RAMCFG },
100 { PwrCmdWrite, 0x12011003, mmGB_ADDR_CONFIG },
101 { PwrCmdWrite, 0x00800010, mmGB_TILE_MODE0 },
102 { PwrCmdWrite, 0x00800810, mmGB_TILE_MODE1 },
103 { PwrCmdWrite, 0x00801010, mmGB_TILE_MODE2 },
104 { PwrCmdWrite, 0x00801810, mmGB_TILE_MODE3 },
105 { PwrCmdWrite, 0x00802810, mmGB_TILE_MODE4 },
106 { PwrCmdWrite, 0x00802808, mmGB_TILE_MODE5 },
107 { PwrCmdWrite, 0x00802814, mmGB_TILE_MODE6 },
108 { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE7 },
109 { PwrCmdWrite, 0x00000004, mmGB_TILE_MODE8 },
110 { PwrCmdWrite, 0x02000008, mmGB_TILE_MODE9 },
111 { PwrCmdWrite, 0x02000010, mmGB_TILE_MODE10 },
112 { PwrCmdWrite, 0x06000014, mmGB_TILE_MODE11 },
113 { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE12 },
114 { PwrCmdWrite, 0x02400008, mmGB_TILE_MODE13 },
115 { PwrCmdWrite, 0x02400010, mmGB_TILE_MODE14 },
116 { PwrCmdWrite, 0x02400030, mmGB_TILE_MODE15 },
117 { PwrCmdWrite, 0x06400014, mmGB_TILE_MODE16 },
118 { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE17 },
119 { PwrCmdWrite, 0x0040000c, mmGB_TILE_MODE18 },
120 { PwrCmdWrite, 0x0100000c, mmGB_TILE_MODE19 },
121 { PwrCmdWrite, 0x0100001c, mmGB_TILE_MODE20 },
122 { PwrCmdWrite, 0x01000034, mmGB_TILE_MODE21 },
123 { PwrCmdWrite, 0x01000024, mmGB_TILE_MODE22 },
124 { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE23 },
125 { PwrCmdWrite, 0x0040001c, mmGB_TILE_MODE24 },
126 { PwrCmdWrite, 0x01000020, mmGB_TILE_MODE25 },
127 { PwrCmdWrite, 0x01000038, mmGB_TILE_MODE26 },
128 { PwrCmdWrite, 0x02c00008, mmGB_TILE_MODE27 },
129 { PwrCmdWrite, 0x02c00010, mmGB_TILE_MODE28 },
130 { PwrCmdWrite, 0x06c00014, mmGB_TILE_MODE29 },
131 { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE30 },
132 { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE31 },
133 { PwrCmdWrite, 0x000000a8, mmGB_MACROTILE_MODE0 },
134 { PwrCmdWrite, 0x000000a4, mmGB_MACROTILE_MODE1 },
135 { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE2 },
136 { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE3 },
137 { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE4 },
138 { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE5 },
139 { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE6 },
140 { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE7 },
141 { PwrCmdWrite, 0x000000ee, mmGB_MACROTILE_MODE8 },
142 { PwrCmdWrite, 0x000000ea, mmGB_MACROTILE_MODE9 },
143 { PwrCmdWrite, 0x000000e9, mmGB_MACROTILE_MODE10 },
144 { PwrCmdWrite, 0x000000e5, mmGB_MACROTILE_MODE11 },
145 { PwrCmdWrite, 0x000000e4, mmGB_MACROTILE_MODE12 },
146 { PwrCmdWrite, 0x000000e0, mmGB_MACROTILE_MODE13 },
147 { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE14 },
148 { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE15 },
149 { PwrCmdWrite, 0x00900000, mmHDP_NONSURFACE_BASE },
150 { PwrCmdWrite, 0x00008000, mmHDP_NONSURFACE_INFO },
151 { PwrCmdWrite, 0x3fffffff, mmHDP_NONSURFACE_SIZE },
152 { PwrCmdWrite, 0x00000003, mmBIF_FB_EN },
153 //{ PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET },
154 { PwrCmdWrite, 0x00000000, mmSRBM_CNTL },
155 { PwrCmdWrite, 0x00020000, mmSRBM_CNTL },
156 { PwrCmdWrite, 0x80000000, mmATC_VMID0_PASID_MAPPING },
157 { PwrCmdWrite, 0x00000000, mmATC_VMID_PASID_MAPPING_UPDATE_STATUS },
158 { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
159 { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
160 { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
161 { PwrCmdWrite, 0xe0000000, mmGRBM_GFX_INDEX },
162 { PwrCmdWrite, 0x00000000, mmCGTS_TCC_DISABLE },
163 { PwrCmdWrite, 0x00000000, mmTCP_ADDR_CONFIG },
164 { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
165 { PwrCmdWrite, 0x76543210, mmTCP_CHAN_STEER_LO },
166 { PwrCmdWrite, 0xfedcba98, mmTCP_CHAN_STEER_HI },
167 { PwrCmdWrite, 0x00000000, mmDB_DEBUG2 },
168 { PwrCmdWrite, 0x00000000, mmDB_DEBUG },
169 { PwrCmdWrite, 0x00002b16, mmCP_QUEUE_THRESHOLDS },
170 { PwrCmdWrite, 0x00006030, mmCP_MEQ_THRESHOLDS },
171 { PwrCmdWrite, 0x01000104, mmSPI_CONFIG_CNTL_1 },
172 { PwrCmdWrite, 0x98184020, mmPA_SC_FIFO_SIZE },
173 { PwrCmdWrite, 0x00000001, mmVGT_NUM_INSTANCES },
174 { PwrCmdWrite, 0x00000000, mmCP_PERFMON_CNTL },
175 { PwrCmdWrite, 0x01180000, mmSQ_CONFIG },
176 { PwrCmdWrite, 0x00000000, mmVGT_CACHE_INVALIDATION },
177 { PwrCmdWrite, 0x00000000, mmSQ_THREAD_TRACE_BASE },
178 { PwrCmdWrite, 0x0000df80, mmSQ_THREAD_TRACE_MASK },
179 { PwrCmdWrite, 0x02249249, mmSQ_THREAD_TRACE_MODE },
180 { PwrCmdWrite, 0x00000000, mmPA_SC_LINE_STIPPLE_STATE },
181 { PwrCmdWrite, 0x00000000, mmCB_PERFCOUNTER0_SELECT1 },
182 { PwrCmdWrite, 0x06000100, mmCGTT_VGT_CLK_CTRL },
183 { PwrCmdWrite, 0x00000007, mmPA_CL_ENHANCE },
184 { PwrCmdWrite, 0x00000001, mmPA_SC_ENHANCE },
185 { PwrCmdWrite, 0x00ffffff, mmPA_SC_FORCE_EOV_MAX_CNTS },
186 { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
187 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
188 { PwrCmdWrite, 0x00000010, mmSRBM_GFX_CNTL },
189 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
190 { PwrCmdWrite, 0x00000020, mmSRBM_GFX_CNTL },
191 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
192 { PwrCmdWrite, 0x00000030, mmSRBM_GFX_CNTL },
193 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
194 { PwrCmdWrite, 0x00000040, mmSRBM_GFX_CNTL },
195 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
196 { PwrCmdWrite, 0x00000050, mmSRBM_GFX_CNTL },
197 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
198 { PwrCmdWrite, 0x00000060, mmSRBM_GFX_CNTL },
199 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
200 { PwrCmdWrite, 0x00000070, mmSRBM_GFX_CNTL },
201 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
202 { PwrCmdWrite, 0x00000080, mmSRBM_GFX_CNTL },
203 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
204 { PwrCmdWrite, 0x00000090, mmSRBM_GFX_CNTL },
205 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
206 { PwrCmdWrite, 0x000000a0, mmSRBM_GFX_CNTL },
207 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
208 { PwrCmdWrite, 0x000000b0, mmSRBM_GFX_CNTL },
209 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
210 { PwrCmdWrite, 0x000000c0, mmSRBM_GFX_CNTL },
211 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
212 { PwrCmdWrite, 0x000000d0, mmSRBM_GFX_CNTL },
213 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
214 { PwrCmdWrite, 0x000000e0, mmSRBM_GFX_CNTL },
215 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
216 { PwrCmdWrite, 0x000000f0, mmSRBM_GFX_CNTL },
217 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
218 { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
219 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
220 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
221 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
222 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
223 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
224 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
225 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
226 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
227 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
228 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
229 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
230 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
231 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
232 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
233 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
234 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
235 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
236 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
237 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
238 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
239 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
240 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
241 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
242 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
243 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
244 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
245 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
246 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
247 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
248 { PwrCmdWrite, 0x00000000, mmRLC_PG_CNTL },
249 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS2 },
250 { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
251 { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
252 { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
253 { PwrCmdWrite, 0x0000000e, mmSH_MEM_APE1_BASE },
254 { PwrCmdWrite, 0x0000020d, mmSH_MEM_APE1_LIMIT },
255 { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
256 { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
257 { PwrCmdWrite, 0x00000000, mmSH_MEM_CONFIG },
258 { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
259 { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
260 { PwrCmdWrite, 0x00000000, mmCP_RB_VMID },
261 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
262 { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
263 { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
264 { PwrCmdWrite, 0x00000000, mmRLC_SRM_CNTL },
265 { PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL },
266 { PwrCmdWrite, 0x00000000, mmCP_ME_CNTL },
267 { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
268 { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
269 { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
270 { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
271 { PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL },
272 { PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL },
273 { PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE },
274 { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
275 { PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO },
276 { PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI },
277 { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
278 { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
279 { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
280 { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
281 { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
282 { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
283 { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
284 { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
285 { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
286 { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
287 { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
288 { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
289 { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
290 { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
291 { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
292 { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
293 { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
294 { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
295 { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
296 { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
297 { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
298 { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
299 { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
300 { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
301 { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
302 { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
303 { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
304 { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
305 { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
306 { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
307 { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
308 { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
309 { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
310 { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
311 { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
312 { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
313 { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
314 { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
315 { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
316 { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
317 { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
318 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
319 { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
320 { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
321 { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
322 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
323 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
324 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
325 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
326 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
327 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
328 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
329 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
330 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
331 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
332 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
333 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
334 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
335 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
336 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
337 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
338 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
339 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
340 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
341 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
342 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
343 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
344 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
345 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
346 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
347 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
348 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
349 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
350 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
351 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
352 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
353 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
354 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
355 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
356 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
357 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
358 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
359 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
360 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
361 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
362 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
363 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
364 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
365 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
366 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
367 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
368 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
369 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
370 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
371 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
372 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
373 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
374 { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
375 { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
376 { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
377 { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
378 { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
379 { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
380 { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
381 { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
382 { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
383 { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
384 { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
385 { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
386 { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
387 { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
388 { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
389 { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
390 { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
391 { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
392 { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
393 { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
394 { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
395 { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
396 { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
397 { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
398 { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
399 { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
400 { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
401 { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
402 { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
403 { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
404 { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
405 { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
406 { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
407 { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
408 { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
409 { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
410 { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
411 { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
412 { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
413 { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
414 { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
415 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
416 { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
417 { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
418 { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
419 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
420 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
421 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
422 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
423 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
424 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
425 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
426 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
427 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
428 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
429 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
430 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
431 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
432 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
433 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
434 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
435 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
436 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
437 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
438 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
439 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
440 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
441 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
442 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
443 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
444 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
445 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
446 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
447 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
448 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
449 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
450 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
451 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
452 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
453 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
454 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
455 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
456 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
457 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
458 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
459 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
460 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
461 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
462 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
463 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
464 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
465 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
466 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
467 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
468 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
469 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
470 { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
471 { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
472 { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
473 { PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO },
474 { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
475 { PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0 },
476 { PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0 },
477 { PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0 },
478 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
479 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
480 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
481 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
482 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
483 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
484 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
485 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
486 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
487 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
488 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
489 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
490 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
491 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
492 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
493 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
494 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
495 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
496 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
497 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
498 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
499 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
500 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
501 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
502 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
503 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
504 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
505 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
506 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
507 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
508 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
509 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
510 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
511 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
512 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
513 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
514 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
515 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
516 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
517 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
518 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
519 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
520 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
521 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
522 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
523 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
524 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
525 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
526 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
527 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
528 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
529 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
530 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
531 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
532 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
533 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
534 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
535 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
536 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
537 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
538 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
539 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
540 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
541 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
542 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
543 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
544 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
545 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
546 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
547 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
548 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
549 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
550 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
551 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
552 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
553 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
554 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
555 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
556 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
557 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
558 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
559 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
560 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
561 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
562 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
563 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
564 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
565 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
566 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
567 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
568 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
569 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
570 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
571 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
572 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
573 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
574 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
575 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
576 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
577 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
578 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
579 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
580 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
581 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
582 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
583 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
584 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
585 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
586 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
587 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
588 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
589 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
590 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
591 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
592 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
593 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
594 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
595 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
596 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
597 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
598 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
599 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
600 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
601 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
602 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
603 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
604 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
605 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
606 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
607 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
608 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
609 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
610 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
611 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
612 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
613 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
614 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
615 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
616 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
617 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
618 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
619 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
620 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
621 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
622 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
623 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
624 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
625 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
626 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
627 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
628 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
629 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
630 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
631 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
632 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
633 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
634 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
635 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
636 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
637 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
638 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
639 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
640 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
641 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
642 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
643 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
644 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
645 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
646 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
647 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
648 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
649 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
650 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
651 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
652 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
653 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
654 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
655 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
656 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
657 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
658 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
659 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
660 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
661 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
662 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
663 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
664 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
665 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
666 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
667 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
668 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
669 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
670 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
671 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
672 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
673 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
674 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
675 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
676 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
677 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
678 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
679 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
680 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
681 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
682 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
683 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
684 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
685 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
686 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
687 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
688 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
689 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
690 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
691 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
692 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
693 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
694 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
695 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
696 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
697 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
698 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
699 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
700 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
701 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
702 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
703 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
704 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
705 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
706 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
707 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
708 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
709 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
710 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
711 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
712 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
713 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
714 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
715 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
716 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
717 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
718 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
719 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
720 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
721 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
722 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
723 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
724 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
725 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
726 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
727 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
728 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
729 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
730 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
731 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
732 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
733 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
734 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
735 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
736 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
737 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
738 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
739 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
740 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
741 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
742 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
743 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
744 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
745 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
746 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
747 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
748 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
749 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
750 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
751 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
752 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
753 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
754 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
755 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
756 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
757 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
758 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
759 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
760 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
761 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
762 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
763 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
764 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
765 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
766 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
767 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
768 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
769 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
770 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
771 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
772 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
773 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
774 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
775 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
776 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
777 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
778 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
779 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
780 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
781 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
782 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
783 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
784 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
785 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
786 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
787 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
788 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
789 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
790 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
791 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
792 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
793 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
794 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
795 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
796 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
797 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
798 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
799 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
800 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
801 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
802 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
803 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
804 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
805 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
806 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
807 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
808 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
809 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
810 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
811 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
812 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
813 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
814 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
815 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
816 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
817 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
818 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
819 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
820 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
821 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
822 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
823 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
824 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
825 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
826 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
827 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
828 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
829 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
830 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
831 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
832 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
833 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
834 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
835 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
836 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
837 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
838 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
839 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
840 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
841 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
842 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
843 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
844 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
845 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
846 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
847 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
848 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
849 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
850 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
851 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
852 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
853 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
854 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
855 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
856 { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
857 { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
858 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
859 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
860 { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
861 { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
862 { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
863 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
864 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
865 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
866 { PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0 },
867 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
868 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
869 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
870 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
871 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
872 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
873 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
874 { PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0 },
875 { PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0 },
876 { PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0 },
877 { PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0 },
878 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
879 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
880 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
881 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
882 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
883 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
884 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
885 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
886 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
887 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
888 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
889 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
890 { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
891 { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
892 { PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO },
893 { PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0 },
894 { PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0 },
895 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
896 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
897 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
898 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
899 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
900 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
901 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
902 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
903 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
904 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
905 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
906 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
907 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
908 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
909 { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
910 { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
911 { PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO },
912 { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
913 { PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0 },
914 { PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0 },
915 { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
916 { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
917 { PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0 },
918 { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
919 { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
920 { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
921 { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
922 { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
923 { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
924 { PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0 },
925 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
926 { PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0 },
927 { PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0 },
928 { PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0 },
929 { PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0 },
930 { PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0 },
931 { PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0 },
932 { PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0 },
933 { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
934 { PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0 },
935 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
936 { PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0 },
937 { PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0 },
938 { PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0 },
939 { PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0 },
940 { PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0 },
941 { PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0 },
942 { PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0 },
943 { PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0 },
944 { PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0 },
945 { PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0 },
946 { PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0 },
947 { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
948 { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
949 { PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0 },
950 { PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0 },
951 { PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0 },
952 { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
953 { PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0 },
954 { PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0 },
955 { PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0 },
956 { PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0 },
957 { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
958 { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
959 { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
960 { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
961 { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
962 { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
963 { PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0 },
964 { PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0 },
965 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
966 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
967 { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
968 { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
969 { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
970 { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
971 { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
972 { PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0 },
973 { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
974 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
975 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
976 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
977 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
978 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
979 { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
980 { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
981 { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
982 { PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0 },
983 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
984 { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
985 { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
986 { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
987 { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
988 { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
989 { PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0 },
990 { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
991 { PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0 },
992 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
993 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
994 { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
995 { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
996 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
997 { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
998 { PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0 },
999 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1000 { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
1001 { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
1002 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
1003 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
1004 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
1005 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
1006 { PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0 },
1007 { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
1008 { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
1009 { PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0 },
1010 { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
1011 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
1012 { PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0 },
1013 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1014 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1015 { PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0 },
1016 { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
1017 { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
1018 { PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0 },
1019 { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
1020 { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
1021 { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
1022 { PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0 },
1023 { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
1024 { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
1025 { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
1026 { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
1027 { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
1028 { PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0 },
1029 { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
1030 { PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0 },
1031 { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
1032 { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
1033 { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
1034 { PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0 },
1035 { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
1036 { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
1037 { PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0 },
1038 { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
1039 { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
1040 { PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0 },
1041 { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
1042 { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
1043 { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
1044 { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
1045 { PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0 },
1046 { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
1047 { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
1048 { PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0 },
1049 { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
1050 { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
1051 { PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0 },
1052 { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
1053 { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
1054 { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
1055 { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
1056 { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
1057 { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
1058 { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
1059 { PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0 },
1060 { PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0 },
1061 { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
1062 { PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0 },
1063 { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
1064 { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
1065 { PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0 },
1066 { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
1067 { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
1068 { PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0 },
1069 { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
1070 { PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0 },
1071 { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
1072 { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
1073 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
1074 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
1075 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
1076 { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
1077 { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
1078 { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
1079 { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
1080 { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
1081 { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
1082 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1083 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
1084 { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
1085 { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
1086 { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
1087 { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
1088 { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
1089 { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
1090 { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
1091 { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
1092 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
1093 { PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0 },
1094 { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
1095 { PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0 },
1096 { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
1097 { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
1098 { PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0 },
1099 { PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0 },
1100 { PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0 },
1101 { PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0 },
1102 { PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0 },
1103 { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
1104 { PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0 },
1105 { PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0 },
1106 { PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0 },
1107 { PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0 },
1108 { PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0 },
1109 { PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0 },
1110 { PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0 },
1111 { PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0 },
1112 { PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0 },
1113 { PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0 },
1114 { PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0 },
1115 { PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0 },
1116 { PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0 },
1117 { PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0 },
1118 { PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0 },
1119 { PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0 },
1120 { PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0 },
1121 { PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0 },
1122 { PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0 },
1123 { PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0 },
1124 { PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0 },
1125 { PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0 },
1126 { PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0 },
1127 { PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0 },
1128 { PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0 },
1129 { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
1130 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1131 { PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0 },
1132 { PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0 },
1133 { PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0 },
1134 { PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0 },
1135 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1136 { PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0 },
1137 { PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0 },
1138 { PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0 },
1139 { PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0 },
1140 { PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0 },
1141 { PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0 },
1142 { PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0 },
1143 { PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0 },
1144 { PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0 },
1145 { PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0 },
1146 { PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0 },
1147 { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
1148 { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
1149 { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
1150 { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
1151 { PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0 },
1152 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
1153 { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
1154 { PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0 },
1155 { PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0 },
1156 { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
1157 { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
1158 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1159 { PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0 },
1160 { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
1161 { PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0 },
1162 { PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0 },
1163 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1164 { PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0 },
1165 { PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0 },
1166 { PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0 },
1167 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1168 { PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0 },
1169 { PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0 },
1170 { PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0 },
1171 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1172 { PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0 },
1173 { PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0 },
1174 { PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0 },
1175 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1176 { PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0 },
1177 { PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0 },
1178 { PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0 },
1179 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1180 { PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0 },
1181 { PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0 },
1182 { PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0 },
1183 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1184 { PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0 },
1185 { PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0 },
1186 { PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0 },
1187 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1188 { PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0 },
1189 { PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0 },
1190 { PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0 },
1191 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1192 { PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0 },
1193 { PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0 },
1194 { PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0 },
1195 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
1196 { PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0 },
1197 { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
1198 { PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0 },
1199 { PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0 },
1200 { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
1201 { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
1202 { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
1203 { PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0 },
1204 { PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0 },
1205 { PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0 },
1206 { PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0 },
1207 { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
1208 { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
1209 { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
1210 { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
1211 { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
1212 { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
1213 { PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0 },
1214 { PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0 },
1215 { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
1216 { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
1217 { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
1218 { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
1219 { PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0 },
1220 { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
1221 { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
1222 { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
1223 { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
1224 { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
1225 { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
1226 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1227 { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
1228 { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
1229 { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
1230 { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
1231 { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
1232 { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
1233 { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
1234 { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
1235 { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
1236 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1237 { PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0 },
1238 { PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0 },
1239 { PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0 },
1240 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
1241 { PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0 },
1242 { PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0 },
1243 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
1244 { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
1245 { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
1246 { PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0 },
1247 { PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0 },
1248 { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
1249 { PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0 },
1250 { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
1251 { PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0 },
1252 { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
1253 { PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0 },
1254 { PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0 },
1255 { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
1256 { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
1257 { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
1258 { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
1259 { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
1260 { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
1261 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
1262 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
1263 { PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0 },
1264 { PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0 },
1265 { PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0 },
1266 { PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0 },
1267 { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
1268 { PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0 },
1269 { PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0 },
1270 { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
1271 { PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0 },
1272 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
1273 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
1274 { PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0 },
1275 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1276 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
1277 { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
1278 { PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0 },
1279 { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
1280 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
1281 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
1282 { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
1283 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
1284 { PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0 },
1285 { PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0 },
1286 { PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0 },
1287 { PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0 },
1288 { PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0 },
1289 { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
1290 { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
1291 { PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0 },
1292 { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
1293 { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
1294 { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
1295 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
1296 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
1297 { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
1298 { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
1299 { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
1300 { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
1301 { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
1302 { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
1303 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
1304 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
1305 { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
1306 { PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0 },
1307 { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
1308 { PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0 },
1309 { PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0 },
1310 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
1311 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
1312 { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
1313 { PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0 },
1314 { PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0 },
1315 { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
1316 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
1317 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
1318 { PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0 },
1319 { PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0 },
1320 { PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0 },
1321 { PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0 },
1322 { PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0 },
1323 { PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0 },
1324 { PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0 },
1325 { PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0 },
1326 { PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0 },
1327 { PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0 },
1328 { PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0 },
1329 { PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0 },
1330 { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
1331 { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
1332 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
1333 { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
1334 { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
1335 { PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0 },
1336 { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
1337 { PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0 },
1338 { PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0 },
1339 { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
1340 { PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0 },
1341 { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
1342 { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
1343 { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
1344 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1345 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
1346 { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
1347 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1348 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
1349 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
1350 { PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0 },
1351 { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
1352 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
1353 { PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0 },
1354 { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
1355 { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
1356 { PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0 },
1357 { PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0 },
1358 { PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0 },
1359 { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
1360 { PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0 },
1361 { PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0 },
1362 { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
1363 { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
1364 { PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0 },
1365 { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
1366 { PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0 },
1367 { PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0 },
1368 { PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0 },
1369 { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
1370 { PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0 },
1371 { PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0 },
1372 { PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0 },
1373 { PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0 },
1374 { PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0 },
1375 { PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0 },
1376 { PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0 },
1377 { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
1378 { PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0 },
1379 { PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0 },
1380 { PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0 },
1381 { PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0 },
1382 { PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0 },
1383 { PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0 },
1384 { PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0 },
1385 { PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0 },
1386 { PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0 },
1387 { PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0 },
1388 { PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0 },
1389 { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
1390 { PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0 },
1391 { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
1392 { PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0 },
1393 { PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0 },
1394 { PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0 },
1395 { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
1396 { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
1397 { PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0 },
1398 { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
1399 { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
1400 { PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0 },
1401 { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
1402 { PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0 },
1403 { PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0 },
1404 { PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0 },
1405 { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
1406 { PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0 },
1407 { PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0 },
1408 { PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0 },
1409 { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
1410 { PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0 },
1411 { PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0 },
1412 { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
1413 { PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0 },
1414 { PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0 },
1415 { PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0 },
1416 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
1417 { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
1418 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
1419 { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
1420 { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
1421 { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
1422 { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
1423 { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
1424 { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
1425 { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
1426 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
1427 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
1428 { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
1429 { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
1430 { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
1431 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
1432 { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
1433 { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
1434 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1435 { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
1436 { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
1437 { PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0 },
1438 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
1439 { PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0 },
1440 { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
1441 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
1442 { PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0 },
1443 { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
1444 { PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0 },
1445 { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
1446 { PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0 },
1447 { PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0 },
1448 { PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0 },
1449 { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
1450 { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
1451 { PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0 },
1452 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
1453 { PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0 },
1454 { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
1455 { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
1456 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
1457 { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
1458 { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
1459 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
1460 { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
1461 { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
1462 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
1463 { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
1464 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
1465 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
1466 { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
1467 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
1468 { PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0 },
1469 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
1470 { PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0 },
1471 { PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0 },
1472 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1473 { PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0 },
1474 { PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0 },
1475 { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
1476 { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
1477 { PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0 },
1478 { PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0 },
1479 { PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0 },
1480 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1481 { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
1482 { PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0 },
1483 { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
1484 { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
1485 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1486 { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
1487 { PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0 },
1488 { PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0 },
1489 { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
1490 { PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0 },
1491 { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
1492 { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
1493 { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
1494 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
1495 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
1496 { PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0 },
1497 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
1498 { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
1499 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
1500 { PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0 },
1501 { PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0 },
1502 { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
1503 { PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0 },
1504 { PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0 },
1505 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
1506 { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
1507 { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
1508 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
1509 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
1510 { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
1511 { PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0 },
1512 { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
1513 { PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0 },
1514 { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
1515 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
1516 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
1517 { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
1518 { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
1519 { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
1520 { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
1521 { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
1522 { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
1523 { PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0 },
1524 { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
1525 { PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0 },
1526 { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
1527 { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
1528 { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
1529 { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
1530 { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
1531 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
1532 { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
1533 { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
1534 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
1535 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
1536 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
1537 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
1538 { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
1539 { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
1540 { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
1541 { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
1542 { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
1543 { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
1544 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
1545 { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
1546 { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
1547 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
1548 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
1549 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
1550 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
1551 { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
1552 { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
1553 { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
1554 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1555 { PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0 },
1556 { PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0 },
1557 { PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0 },
1558 { PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0 },
1559 { PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0 },
1560 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1561 { PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0 },
1562 { PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0 },
1563 { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
1564 { PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0 },
1565 { PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0 },
1566 { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
1567 { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
1568 { PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0 },
1569 { PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0 },
1570 { PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0 },
1571 { PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0 },
1572 { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
1573 { PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0 },
1574 { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
1575 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
1576 { PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0 },
1577 { PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0 },
1578 { PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0 },
1579 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1580 { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
1581 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
1582 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
1583 { PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0 },
1584 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
1585 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
1586 { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
1587 { PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0 },
1588 { PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0 },
1589 { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
1590 { PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0 },
1591 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
1592 { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
1593 { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
1594 { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
1595 { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
1596 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
1597 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
1598 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
1599 { PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0 },
1600 { PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0 },
1601 { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
1602 { PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0 },
1603 { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
1604 { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
1605 { PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0 },
1606 { PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0 },
1607 { PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0 },
1608 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
1609 { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
1610 { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
1611 { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
1612 { PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0 },
1613 { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
1614 { PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0 },
1615 { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
1616 { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
1617 { PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0 },
1618 { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
1619 { PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0 },
1620 { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
1621 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
1622 { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
1623 { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
1624 { PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0 },
1625 { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
1626 { PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0 },
1627 { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
1628 { PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0 },
1629 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
1630 { PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0 },
1631 { PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0 },
1632 { PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0 },
1633 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
1634 { PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0 },
1635 { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
1636 { PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0 },
1637 { PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0 },
1638 { PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0 },
1639 { PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0 },
1640 { PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0 },
1641 { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
1642 { PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0 },
1643 { PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0 },
1644 { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
1645 { PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0 },
1646 { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
1647 { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
1648 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1649 { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
1650 { PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0 },
1651 { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
1652 { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
1653 { PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0 },
1654 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1655 { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
1656 { PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0 },
1657 { PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0 },
1658 { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
1659 { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
1660 { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
1661 { PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0 },
1662 { PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0 },
1663 { PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0 },
1664 { PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0 },
1665 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
1666 { PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0 },
1667 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
1668 { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
1669 { PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0 },
1670 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
1671 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
1672 { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
1673 { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
1674 { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
1675 { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
1676 { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
1677 { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
1678 { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
1679 { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
1680 { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
1681 { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
1682 { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
1683 { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
1684 { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
1685 { PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0 },
1686 { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
1687 { PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0 },
1688 { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
1689 { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
1690 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
1691 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
1692 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
1693 { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
1694 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1695 { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
1696 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
1697 { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
1698 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
1699 { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
1700 { PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0 },
1701 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
1702 { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
1703 { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
1704 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
1705 { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
1706 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
1707 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
1708 { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
1709 { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
1710 { PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0 },
1711 { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
1712 { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
1713 { PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0 },
1714 { PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0 },
1715 { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
1716 { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
1717 { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
1718 { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
1719 { PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0 },
1720 { PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0 },
1721 { PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0 },
1722 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1723 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
1724 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
1725 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1726 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
1727 { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
1728 { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
1729 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
1730 { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
1731 { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
1732 { PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0 },
1733 { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
1734 { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
1735 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
1736 { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
1737 { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
1738 { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
1739 { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
1740 { PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0 },
1741 { PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0 },
1742 { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
1743 { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
1744 { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
1745 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
1746 { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
1747 { PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0 },
1748 { PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0 },
1749 { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
1750 { PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0 },
1751 { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
1752 { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
1753 { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
1754 { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
1755 { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
1756 { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
1757 { PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0 },
1758 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
1759 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1760 { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
1761 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1762 { PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0 },
1763 { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
1764 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
1765 { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
1766 { PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0 },
1767 { PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0 },
1768 { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
1769 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
1770 { PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0 },
1771 { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
1772 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
1773 { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
1774 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
1775 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
1776 { PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0 },
1777 { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
1778 { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
1779 { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
1780 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
1781 { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
1782 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
1783 { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
1784 { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
1785 { PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0 },
1786 { PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0 },
1787 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
1788 { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
1789 { PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0 },
1790 { PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0 },
1791 { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
1792 { PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0 },
1793 { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
1794 { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
1795 { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
1796 { PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0 },
1797 { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
1798 { PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0 },
1799 { PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0 },
1800 { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
1801 { PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0 },
1802 { PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0 },
1803 { PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0 },
1804 { PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0 },
1805 { PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0 },
1806 { PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0 },
1807 { PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0 },
1808 { PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0 },
1809 { PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0 },
1810 { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
1811 { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
1812 { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
1813 { PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0 },
1814 { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
1815 { PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0 },
1816 { PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0 },
1817 { PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0 },
1818 { PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0 },
1819 { PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0 },
1820 { PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0 },
1821 { PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0 },
1822 { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
1823 { PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0 },
1824 { PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0 },
1825 { PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0 },
1826 { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
1827 { PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0 },
1828 { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
1829 { PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0 },
1830 { PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0 },
1831 { PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0 },
1832 { PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0 },
1833 { PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0 },
1834 { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
1835 { PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0 },
1836 { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
1837 { PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0 },
1838 { PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0 },
1839 { PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0 },
1840 { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
1841 { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
1842 { PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0 },
1843 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
1844 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
1845 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
1846 { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
1847 { PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0 },
1848 { PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0 },
1849 { PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0 },
1850 { PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0 },
1851 { PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0 },
1852 { PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0 },
1853 { PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0 },
1854 { PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0 },
1855 { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
1856 { PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0 },
1857 { PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0 },
1858 { PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0 },
1859 { PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0 },
1860 { PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0 },
1861 { PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0 },
1862 { PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0 },
1863 { PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0 },
1864 { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
1865 { PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0 },
1866 { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
1867 { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
1868 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
1869 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
1870 { PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0 },
1871 { PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0 },
1872 { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
1873 { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
1874 { PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0 },
1875 { PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0 },
1876 { PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0 },
1877 { PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0 },
1878 { PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0 },
1879 { PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0 },
1880 { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
1881 { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
1882 { PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0 },
1883 { PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0 },
1884 { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
1885 { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
1886 { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
1887 { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
1888 { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
1889 { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
1890 { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
1891 { PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0 },
1892 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
1893 { PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0 },
1894 { PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0 },
1895 { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
1896 { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
1897 { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
1898 { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
1899 { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
1900 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
1901 { PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0 },
1902 { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
1903 { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
1904 { PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0 },
1905 { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
1906 { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
1907 { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
1908 { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
1909 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
1910 { PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0 },
1911 { PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0 },
1912 { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
1913 { PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0 },
1914 { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
1915 { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
1916 { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
1917 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
1918 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
1919 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
1920 { PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0 },
1921 { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
1922 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
1923 { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
1924 { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
1925 { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
1926 { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
1927 { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
1928 { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
1929 { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
1930 { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
1931 { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
1932 { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
1933 { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
1934 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
1935 { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
1936 { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
1937 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
1938 { PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0 },
1939 { PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0 },
1940 { PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0 },
1941 { PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0 },
1942 { PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0 },
1943 { PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0 },
1944 { PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0 },
1945 { PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0 },
1946 { PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0 },
1947 { PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0 },
1948 { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
1949 { PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0 },
1950 { PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0 },
1951 { PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0 },
1952 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
1953 { PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0 },
1954 { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
1955 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
1956 { PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0 },
1957 { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
1958 { PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0 },
1959 { PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0 },
1960 { PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0 },
1961 { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
1962 { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
1963 { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
1964 { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
1965 { PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0 },
1966 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
1967 { PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0 },
1968 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
1969 { PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0 },
1970 { PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0 },
1971 { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
1972 { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
1973 { PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0 },
1974 { PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0 },
1975 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
1976 { PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0 },
1977 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
1978 { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
1979 { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
1980 { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
1981 { PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0 },
1982 { PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0 },
1983 { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
1984 { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
1985 { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
1986 { PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0 },
1987 { PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0 },
1988 { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
1989 { PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0 },
1990 { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
1991 { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
1992 { PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0 },
1993 { PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0 },
1994 { PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0 },
1995 { PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0 },
1996 { PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0 },
1997 { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
1998 { PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0 },
1999 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2000 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2001 { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
2002 { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
2003 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2004 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
2005 { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
2006 { PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0 },
2007 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2008 { PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0 },
2009 { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
2010 { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
2011 { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
2012 { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
2013 { PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0 },
2014 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2015 { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
2016 { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
2017 { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
2018 { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
2019 { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
2020 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2021 { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
2022 { PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0 },
2023 { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
2024 { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
2025 { PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0 },
2026 { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
2027 { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
2028 { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
2029 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2030 { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
2031 { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
2032 { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
2033 { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
2034 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2035 { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
2036 { PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0 },
2037 { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
2038 { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
2039 { PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0 },
2040 { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
2041 { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
2042 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
2043 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
2044 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2045 { PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0 },
2046 { PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0 },
2047 { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
2048 { PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0 },
2049 { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
2050 { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
2051 { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
2052 { PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0 },
2053 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
2054 { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
2055 { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
2056 { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
2057 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2058 { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
2059 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2060 { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
2061 { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
2062 { PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0 },
2063 { PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0 },
2064 { PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0 },
2065 { PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0 },
2066 { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
2067 { PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0 },
2068 { PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0 },
2069 { PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0 },
2070 { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
2071 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
2072 { PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0 },
2073 { PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0 },
2074 { PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0 },
2075 { PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0 },
2076 { PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0 },
2077 { PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0 },
2078 { PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0 },
2079 { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
2080 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
2081 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
2082 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
2083 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2084 { PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0 },
2085 { PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0 },
2086 { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
2087 { PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0 },
2088 { PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0 },
2089 { PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0 },
2090 { PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0 },
2091 { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
2092 { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
2093 { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
2094 { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
2095 { PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0 },
2096 { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
2097 { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
2098 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
2099 { PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0 },
2100 { PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0 },
2101 { PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0 },
2102 { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
2103 { PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0 },
2104 { PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0 },
2105 { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
2106 { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
2107 { PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0 },
2108 { PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0 },
2109 { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
2110 { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
2111 { PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0 },
2112 { PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0 },
2113 { PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0 },
2114 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
2115 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
2116 { PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0 },
2117 { PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0 },
2118 { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
2119 { PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0 },
2120 { PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0 },
2121 { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
2122 { PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0 },
2123 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
2124 { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
2125 { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
2126 { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
2127 { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
2128 { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
2129 { PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0 },
2130 { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
2131 { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
2132 { PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0 },
2133 { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
2134 { PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0 },
2135 { PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0 },
2136 { PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0 },
2137 { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
2138 { PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0 },
2139 { PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0 },
2140 { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
2141 { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
2142 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
2143 { PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0 },
2144 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
2145 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
2146 { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
2147 { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
2148 { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
2149 { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
2150 { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
2151 { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
2152 { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
2153 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2154 { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
2155 { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
2156 { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
2157 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2158 { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
2159 { PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0 },
2160 { PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0 },
2161 { PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0 },
2162 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
2163 { PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0 },
2164 { PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0 },
2165 { PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0 },
2166 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
2167 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2168 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2169 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2170 { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
2171 { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
2172 { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
2173 { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
2174 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2175 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2176 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2177 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2178 { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
2179 { PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0 },
2180 { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
2181 { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
2182 { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
2183 { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
2184 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2185 { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
2186 { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
2187 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2188 { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
2189 { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
2190 { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
2191 { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
2192 { PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0 },
2193 { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
2194 { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
2195 { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
2196 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2197 { PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0 },
2198 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
2199 { PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0 },
2200 { PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0 },
2201 { PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0 },
2202 { PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0 },
2203 { PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0 },
2204 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2205 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2206 { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
2207 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2208 { PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0 },
2209 { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
2210 { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
2211 { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
2212 { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
2213 { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
2214 { PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0 },
2215 { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
2216 { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
2217 { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
2218 { PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0 },
2219 { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
2220 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2221 { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
2222 { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
2223 { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
2224 { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
2225 { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
2226 { PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0 },
2227 { PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0 },
2228 { PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0 },
2229 { PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0 },
2230 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2231 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2232 { PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0 },
2233 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2234 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2235 { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
2236 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2237 { PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0 },
2238 { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
2239 { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
2240 { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
2241 { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
2242 { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
2243 { PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0 },
2244 { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
2245 { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
2246 { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
2247 { PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0 },
2248 { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
2249 { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
2250 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2251 { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
2252 { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
2253 { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
2254 { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
2255 { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
2256 { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
2257 { PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0 },
2258 { PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0 },
2259 { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
2260 { PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0 },
2261 { PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0 },
2262 { PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0 },
2263 { PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0 },
2264 { PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0 },
2265 { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
2266 { PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0 },
2267 { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
2268 { PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0 },
2269 { PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0 },
2270 { PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0 },
2271 { PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0 },
2272 { PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0 },
2273 { PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0 },
2274 { PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0 },
2275 { PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0 },
2276 { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
2277 { PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0 },
2278 { PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0 },
2279 { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
2280 { PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0 },
2281 { PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0 },
2282 { PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0 },
2283 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2284 { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
2285 { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
2286 { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
2287 { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
2288 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
2289 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
2290 { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
2291 { PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0 },
2292 { PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0 },
2293 { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
2294 { PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0 },
2295 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
2296 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
2297 { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
2298 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
2299 { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
2300 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
2301 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
2302 { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
2303 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2304 { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
2305 { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
2306 { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
2307 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2308 { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
2309 { PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0 },
2310 { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
2311 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2312 { PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0 },
2313 { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
2314 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2315 { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
2316 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2317 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2318 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2319 { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
2320 { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
2321 { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
2322 { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
2323 { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
2324 { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
2325 { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
2326 { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
2327 { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
2328 { PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0 },
2329 { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
2330 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2331 { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
2332 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2333 { PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0 },
2334 { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
2335 { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
2336 { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
2337 { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
2338 { PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0 },
2339 { PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0 },
2340 { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
2341 { PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0 },
2342 { PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0 },
2343 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2344 { PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0 },
2345 { PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0 },
2346 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2347 { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
2348 { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
2349 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2350 { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
2351 { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
2352 { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
2353 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2354 { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
2355 { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
2356 { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
2357 { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
2358 { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
2359 { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
2360 { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
2361 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
2362 { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
2363 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
2364 { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
2365 { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
2366 { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
2367 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
2368 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
2369 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2370 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
2371 { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
2372 { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
2373 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
2374 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
2375 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
2376 { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
2377 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2378 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2379 { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
2380 { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
2381 { PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0 },
2382 { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
2383 { PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0 },
2384 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2385 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2386 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2387 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2388 { PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0 },
2389 { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
2390 { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
2391 { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
2392 { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
2393 { PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0 },
2394 { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
2395 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
2396 { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
2397 { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
2398 { PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0 },
2399 { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
2400 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
2401 { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
2402 { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
2403 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2404 { PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0 },
2405 { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
2406 { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
2407 { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
2408 { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
2409 { PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0 },
2410 { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
2411 { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
2412 { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
2413 { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
2414 { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
2415 { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
2416 { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
2417 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2418 { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
2419 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2420 { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
2421 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2422 { PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0 },
2423 { PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0 },
2424 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2425 { PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0 },
2426 { PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0 },
2427 { PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0 },
2428 { PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0 },
2429 { PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0 },
2430 { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
2431 { PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0 },
2432 { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
2433 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2434 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2435 { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
2436 { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
2437 { PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0 },
2438 { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
2439 { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
2440 { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
2441 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2442 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2443 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2444 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2445 { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
2446 { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
2447 { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
2448 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
2449 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
2450 { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
2451 { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
2452 { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
2453 { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
2454 { PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0 },
2455 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
2456 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
2457 { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
2458 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
2459 { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
2460 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
2461 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
2462 { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
2463 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2464 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2465 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2466 { PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0 },
2467 { PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0 },
2468 { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
2469 { PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0 },
2470 { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
2471 { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
2472 { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
2473 { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
2474 { PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0 },
2475 { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
2476 { PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0 },
2477 { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
2478 { PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0 },
2479 { PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0 },
2480 { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
2481 { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
2482 { PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0 },
2483 { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
2484 { PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0 },
2485 { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
2486 { PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0 },
2487 { PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0 },
2488 { PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0 },
2489 { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
2490 { PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0 },
2491 { PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0 },
2492 { PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0 },
2493 { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
2494 { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
2495 { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
2496 { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
2497 { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
2498 { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
2499 { PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0 },
2500 { PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0 },
2501 { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
2502 { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
2503 { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
2504 { PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0 },
2505 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2506 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2507 { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
2508 { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
2509 { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
2510 { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
2511 { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
2512 { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
2513 { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
2514 { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
2515 { PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0 },
2516 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2517 { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
2518 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2519 { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
2520 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2521 { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
2522 { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
2523 { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
2524 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2525 { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
2526 { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
2527 { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
2528 { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
2529 { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
2530 { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
2531 { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
2532 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
2533 { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
2534 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
2535 { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
2536 { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
2537 { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
2538 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
2539 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
2540 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2541 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
2542 { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
2543 { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
2544 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
2545 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
2546 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
2547 { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
2548 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2549 { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
2550 { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
2551 { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
2552 { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
2553 { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
2554 { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
2555 { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
2556 { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
2557 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2558 { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
2559 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2560 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2561 { PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0 },
2562 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
2563 { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
2564 { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
2565 { PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0 },
2566 { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
2567 { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
2568 { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
2569 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2570 { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
2571 { PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0 },
2572 { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
2573 { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
2574 { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
2575 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
2576 { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
2577 { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
2578 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2579 { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
2580 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2581 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2582 { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
2583 { PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0 },
2584 { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
2585 { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
2586 { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
2587 { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
2588 { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
2589 { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
2590 { PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0 },
2591 { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
2592 { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
2593 { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
2594 { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
2595 { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
2596 { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
2597 { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
2598 { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
2599 { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
2600 { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
2601 { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
2602 { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
2603 { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
2604 { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
2605 { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
2606 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2607 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
2608 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2609 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2610 { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
2611 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
2612 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
2613 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
2614 { PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0 },
2615 { PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0 },
2616 { PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0 },
2617 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
2618 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
2619 { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
2620 { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
2621 { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
2622 { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
2623 { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
2624 { PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0 },
2625 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2626 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2627 { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
2628 { PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0 },
2629 { PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0 },
2630 { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
2631 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2632 { PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0 },
2633 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2634 { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
2635 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
2636 { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
2637 { PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0 },
2638 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2639 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
2640 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
2641 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2642 { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
2643 { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
2644 { PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0 },
2645 { PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0 },
2646 { PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0 },
2647 { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
2648 { PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0 },
2649 { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
2650 { PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0 },
2651 { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
2652 { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
2653 { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
2654 { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
2655 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2656 { PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0 },
2657 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2658 { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
2659 { PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0 },
2660 { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
2661 { PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0 },
2662 { PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0 },
2663 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
2664 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
2665 { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
2666 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2667 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
2668 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
2669 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2670 { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
2671 { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
2672 { PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0 },
2673 { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
2674 { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
2675 { PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0 },
2676 { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
2677 { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
2678 { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
2679 { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
2680 { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
2681 { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
2682 { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
2683 { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
2684 { PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0 },
2685 { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
2686 { PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0 },
2687 { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
2688 { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
2689 { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
2690 { PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0 },
2691 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
2692 { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
2693 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
2694 { PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0 },
2695 { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
2696 { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
2697 { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
2698 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2699 { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
2700 { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
2701 { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
2702 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
2703 { PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0 },
2704 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2705 { PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0 },
2706 { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
2707 { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
2708 { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
2709 { PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0 },
2710 { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
2711 { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
2712 { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
2713 { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
2714 { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
2715 { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
2716 { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
2717 { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
2718 { PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0 },
2719 { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
2720 { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
2721 { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
2722 { PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0 },
2723 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
2724 { PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0 },
2725 { PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0 },
2726 { PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0 },
2727 { PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0 },
2728 { PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0 },
2729 { PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0 },
2730 { PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0 },
2731 { PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0 },
2732 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2733 { PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0 },
2734 { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
2735 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2736 { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
2737 { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
2738 { PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0 },
2739 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2740 { PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0 },
2741 { PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0 },
2742 { PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0 },
2743 { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
2744 { PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0 },
2745 { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
2746 { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
2747 { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
2748 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
2749 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2750 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
2751 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
2752 { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
2753 { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
2754 { PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0 },
2755 { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
2756 { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
2757 { PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0 },
2758 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2759 { PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0 },
2760 { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
2761 { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
2762 { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
2763 { PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0 },
2764 { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
2765 { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
2766 { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
2767 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2768 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2769 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
2770 { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
2771 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2772 { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
2773 { PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0 },
2774 { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
2775 { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
2776 { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
2777 { PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0 },
2778 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
2779 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2780 { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
2781 { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
2782 { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
2783 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2784 { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
2785 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
2786 { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
2787 { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
2788 { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
2789 { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
2790 { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
2791 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2792 { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
2793 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
2794 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2795 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
2796 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
2797 { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
2798 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2799 { PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0 },
2800 { PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0 },
2801 { PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0 },
2802 { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
2803 { PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0 },
2804 { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
2805 { PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0 },
2806 { PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0 },
2807 { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
2808 { PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0 },
2809 { PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0 },
2810 { PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0 },
2811 { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
2812 { PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0 },
2813 { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
2814 { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
2815 { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
2816 { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
2817 { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
2818 { PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0 },
2819 { PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0 },
2820 { PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0 },
2821 { PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0 },
2822 { PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0 },
2823 { PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0 },
2824 { PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0 },
2825 { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
2826 { PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0 },
2827 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
2828 { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
2829 { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
2830 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
2831 { PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0 },
2832 { PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0 },
2833 { PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0 },
2834 { PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0 },
2835 { PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0 },
2836 { PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0 },
2837 { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
2838 { PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0 },
2839 { PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0 },
2840 { PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0 },
2841 { PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0 },
2842 { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
2843 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
2844 { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
2845 { PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0 },
2846 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2847 { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
2848 { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
2849 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2850 { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
2851 { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
2852 { PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0 },
2853 { PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0 },
2854 { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
2855 { PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0 },
2856 { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
2857 { PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0 },
2858 { PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0 },
2859 { PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0 },
2860 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2861 { PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0 },
2862 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
2863 { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
2864 { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
2865 { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
2866 { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
2867 { PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0 },
2868 { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
2869 { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
2870 { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
2871 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2872 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2873 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2874 { PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0 },
2875 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
2876 { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
2877 { PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0 },
2878 { PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0 },
2879 { PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0 },
2880 { PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0 },
2881 { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
2882 { PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0 },
2883 { PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0 },
2884 { PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0 },
2885 { PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0 },
2886 { PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0 },
2887 { PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0 },
2888 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
2889 { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
2890 { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
2891 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
2892 { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
2893 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2894 { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
2895 { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
2896 { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
2897 { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
2898 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2899 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2900 { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
2901 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
2902 { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
2903 { PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0 },
2904 { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
2905 { PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0 },
2906 { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
2907 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2908 { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
2909 { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
2910 { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
2911 { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
2912 { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
2913 { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
2914 { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
2915 { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
2916 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
2917 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
2918 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
2919 { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
2920 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
2921 { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
2922 { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
2923 { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
2924 { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
2925 { PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0 },
2926 { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
2927 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2928 { PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0 },
2929 { PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0 },
2930 { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
2931 { PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0 },
2932 { PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0 },
2933 { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
2934 { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
2935 { PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0 },
2936 { PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0 },
2937 { PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0 },
2938 { PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0 },
2939 { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
2940 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
2941 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
2942 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2943 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2944 { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
2945 { PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0 },
2946 { PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0 },
2947 { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
2948 { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
2949 { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
2950 { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
2951 { PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0 },
2952 { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
2953 { PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0 },
2954 { PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0 },
2955 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2956 { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
2957 { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
2958 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
2959 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
2960 { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
2961 { PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0 },
2962 { PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0 },
2963 { PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0 },
2964 { PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0 },
2965 { PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0 },
2966 { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
2967 { PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0 },
2968 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
2969 { PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0 },
2970 { PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0 },
2971 { PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0 },
2972 { PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0 },
2973 { PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0 },
2974 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
2975 { PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0 },
2976 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
2977 { PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0 },
2978 { PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0 },
2979 { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
2980 { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
2981 { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
2982 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
2983 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
2984 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
2985 { PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0 },
2986 { PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0 },
2987 { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
2988 { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
2989 { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
2990 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
2991 { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
2992 { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
2993 { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
2994 { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
2995 { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
2996 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
2997 { PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0 },
2998 { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
2999 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
3000 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3001 { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
3002 { PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0 },
3003 { PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0 },
3004 { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
3005 { PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0 },
3006 { PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0 },
3007 { PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0 },
3008 { PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0 },
3009 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
3010 { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
3011 { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
3012 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
3013 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3014 { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
3015 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
3016 { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
3017 { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
3018 { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
3019 { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
3020 { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
3021 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
3022 { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
3023 { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
3024 { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
3025 { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
3026 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3027 { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
3028 { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
3029 { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
3030 { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
3031 { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
3032 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
3033 { PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0 },
3034 { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
3035 { PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0 },
3036 { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
3037 { PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0 },
3038 { PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0 },
3039 { PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0 },
3040 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
3041 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3042 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3043 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
3044 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
3045 { PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0 },
3046 { PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0 },
3047 { PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0 },
3048 { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
3049 { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
3050 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3051 { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
3052 { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
3053 { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
3054 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3055 { PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0 },
3056 { PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0 },
3057 { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
3058 { PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0 },
3059 { PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0 },
3060 { PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0 },
3061 { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
3062 { PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0 },
3063 { PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0 },
3064 { PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0 },
3065 { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
3066 { PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0 },
3067 { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
3068 { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
3069 { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
3070 { PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0 },
3071 { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
3072 { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
3073 { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
3074 { PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0 },
3075 { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
3076 { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
3077 { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
3078 { PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0 },
3079 { PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0 },
3080 { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
3081 { PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0 },
3082 { PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0 },
3083 { PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0 },
3084 { PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0 },
3085 { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
3086 { PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0 },
3087 { PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0 },
3088 { PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0 },
3089 { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
3090 { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
3091 { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
3092 { PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0 },
3093 { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
3094 { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
3095 { PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0 },
3096 { PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0 },
3097 { PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0 },
3098 { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
3099 { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
3100 { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
3101 { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
3102 { PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0 },
3103 { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
3104 { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
3105 { PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0 },
3106 { PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0 },
3107 { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
3108 { PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0 },
3109 { PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0 },
3110 { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
3111 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
3112 { PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0 },
3113 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
3114 { PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0 },
3115 { PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0 },
3116 { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
3117 { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
3118 { PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0 },
3119 { PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0 },
3120 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
3121 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3122 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3123 { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
3124 { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
3125 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
3126 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3127 { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
3128 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
3129 { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
3130 { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
3131 { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
3132 { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
3133 { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
3134 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
3135 { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
3136 { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
3137 { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
3138 { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
3139 { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
3140 { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
3141 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
3142 { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
3143 { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
3144 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3145 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3146 { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
3147 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
3148 { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
3149 { PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0 },
3150 { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
3151 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3152 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3153 { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
3154 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
3155 { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
3156 { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
3157 { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
3158 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3159 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3160 { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
3161 { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
3162 { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
3163 { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
3164 { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
3165 { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
3166 { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
3167 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
3168 { PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0 },
3169 { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
3170 { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
3171 { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
3172 { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
3173 { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
3174 { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
3175 { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
3176 { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
3177 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3178 { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
3179 { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
3180 { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
3181 { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
3182 { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
3183 { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
3184 { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
3185 { PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0 },
3186 { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
3187 { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
3188 { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
3189 { PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0 },
3190 { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
3191 { PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0 },
3192 { PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0 },
3193 { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
3194 { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
3195 { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
3196 { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
3197 { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
3198 { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
3199 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3200 { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
3201 { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
3202 { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
3203 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3204 { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
3205 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3206 { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
3207 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
3208 { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
3209 { PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0 },
3210 { PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0 },
3211 { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
3212 { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
3213 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
3214 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3215 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3216 { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
3217 { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
3218 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
3219 { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
3220 { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
3221 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
3222 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
3223 { PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0 },
3224 { PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0 },
3225 { PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0 },
3226 { PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0 },
3227 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
3228 { PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0 },
3229 { PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0 },
3230 { PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0 },
3231 { PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0 },
3232 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
3233 { PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0 },
3234 { PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0 },
3235 { PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0 },
3236 { PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0 },
3237 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
3238 { PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0 },
3239 { PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0 },
3240 { PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0 },
3241 { PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0 },
3242 { PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0 },
3243 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3244 { PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0 },
3245 { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
3246 { PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0 },
3247 { PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0 },
3248 { PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0 },
3249 { PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0 },
3250 { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
3251 { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
3252 { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
3253 { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
3254 { PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0 },
3255 { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
3256 { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
3257 { PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0 },
3258 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
3259 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
3260 { PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0 },
3261 { PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0 },
3262 { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
3263 { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
3264 { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
3265 { PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0 },
3266 { PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0 },
3267 { PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0 },
3268 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
3269 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
3270 { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
3271 { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
3272 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3273 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3274 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3275 { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
3276 { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
3277 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3278 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3279 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3280 { PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0 },
3281 { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
3282 { PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0 },
3283 { PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0 },
3284 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
3285 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
3286 { PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0 },
3287 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
3288 { PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0 },
3289 { PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0 },
3290 { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
3291 { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
3292 { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
3293 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3294 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3295 { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
3296 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
3297 { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
3298 { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
3299 { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
3300 { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
3301 { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
3302 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
3303 { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
3304 { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
3305 { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
3306 { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
3307 { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
3308 { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
3309 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
3310 { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
3311 { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
3312 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3313 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3314 { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
3315 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
3316 { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
3317 { PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0 },
3318 { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
3319 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3320 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3321 { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
3322 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
3323 { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
3324 { PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0 },
3325 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3326 { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
3327 { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
3328 { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
3329 { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
3330 { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
3331 { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
3332 { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
3333 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
3334 { PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0 },
3335 { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
3336 { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
3337 { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
3338 { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
3339 { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
3340 { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
3341 { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
3342 { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
3343 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3344 { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
3345 { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
3346 { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
3347 { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
3348 { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
3349 { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
3350 { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
3351 { PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0 },
3352 { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
3353 { PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0 },
3354 { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
3355 { PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0 },
3356 { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
3357 { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
3358 { PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0 },
3359 { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
3360 { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
3361 { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
3362 { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
3363 { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
3364 { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
3365 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3366 { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
3367 { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
3368 { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
3369 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
3370 { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
3371 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3372 { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
3373 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
3374 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
3375 { PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0 },
3376 { PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0 },
3377 { PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0 },
3378 { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
3379 { PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0 },
3380 { PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0 },
3381 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
3382 { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
3383 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
3384 { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
3385 { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
3386 { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
3387 { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
3388 { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
3389 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
3390 { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
3391 { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
3392 { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
3393 { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
3394 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3395 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
3396 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3397 { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
3398 { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
3399 { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
3400 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3401 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
3402 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3403 { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
3404 { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
3405 { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
3406 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3407 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
3408 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3409 { PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0 },
3410 { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
3411 { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
3412 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3413 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
3414 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3415 { PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0 },
3416 { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
3417 { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
3418 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3419 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
3420 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3421 { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
3422 { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
3423 { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
3424 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3425 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
3426 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3427 { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
3428 { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
3429 { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
3430 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3431 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
3432 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
3433 { PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0 },
3434 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3435 { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
3436 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
3437 { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
3438 { PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0 },
3439 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
3440 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
3441 { PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0 },
3442 { PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0 },
3443 { PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0 },
3444 { PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0 },
3445 { PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0 },
3446 { PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0 },
3447 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
3448 { PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0 },
3449 { PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0 },
3450 { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
3451 { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
3452 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3453 { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
3454 { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
3455 { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
3456 { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
3457 { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
3458 { PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0 },
3459 { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
3460 { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
3461 { PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0 },
3462 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
3463 { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
3464 { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
3465 { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
3466 { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
3467 { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
3468 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
3469 { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
3470 { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
3471 { PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0 },
3472 { PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0 },
3473 { PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0 },
3474 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3475 { PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0 },
3476 { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
3477 { PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0 },
3478 { PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0 },
3479 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3480 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3481 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3482 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3483 { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
3484 { PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0 },
3485 { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
3486 { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
3487 { PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0 },
3488 { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
3489 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
3490 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3491 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3492 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3493 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3494 { PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0 },
3495 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
3496 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3497 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3498 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3499 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3500 { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
3501 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
3502 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3503 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3504 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3505 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3506 { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
3507 { PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0 },
3508 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
3509 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3510 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3511 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3512 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3513 { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
3514 { PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0 },
3515 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
3516 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3517 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3518 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3519 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3520 { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
3521 { PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0 },
3522 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
3523 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
3524 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3525 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3526 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3527 { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
3528 { PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0 },
3529 { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
3530 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3531 { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
3532 { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
3533 { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
3534 { PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0 },
3535 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3536 { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
3537 { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
3538 { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
3539 { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
3540 { PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0 },
3541 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
3542 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
3543 { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
3544 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
3545 { PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0 },
3546 { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
3547 { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
3548 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
3549 { PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0 },
3550 { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
3551 { PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0 },
3552 { PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0 },
3553 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
3554 { PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0 },
3555 { PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0 },
3556 { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
3557 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3558 { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
3559 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
3560 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
3561 { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
3562 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3563 { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
3564 { PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0 },
3565 { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
3566 { PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0 },
3567 { PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0 },
3568 { PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0 },
3569 { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
3570 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3571 { PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0 },
3572 { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
3573 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
3574 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3575 { PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0 },
3576 { PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0 },
3577 { PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0 },
3578 { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
3579 { PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0 },
3580 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
3581 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
3582 { PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0 },
3583 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
3584 { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
3585 { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
3586 { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
3587 { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
3588 { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
3589 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
3590 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
3591 { PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0 },
3592 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
3593 { PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0 },
3594 { PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0 },
3595 { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
3596 { PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0 },
3597 { PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0 },
3598 { PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0 },
3599 { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
3600 { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
3601 { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
3602 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3603 { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
3604 { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
3605 { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
3606 { PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0 },
3607 { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
3608 { PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0 },
3609 { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
3610 { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
3611 { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
3612 { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
3613 { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
3614 { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
3615 { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
3616 { PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0 },
3617 { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
3618 { PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0 },
3619 { PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0 },
3620 { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
3621 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3622 { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
3623 { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
3624 { PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0 },
3625 { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
3626 { PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0 },
3627 { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
3628 { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
3629 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3630 { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
3631 { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
3632 { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
3633 { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
3634 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
3635 { PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0 },
3636 { PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0 },
3637 { PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0 },
3638 { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
3639 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
3640 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
3641 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
3642 { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
3643 { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
3644 { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
3645 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
3646 { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
3647 { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
3648 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
3649 { PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0 },
3650 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
3651 { PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0 },
3652 { PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0 },
3653 { PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0 },
3654 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
3655 { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
3656 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
3657 { PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0 },
3658 { PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0 },
3659 { PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0 },
3660 { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
3661 { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
3662 { PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0 },
3663 { PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0 },
3664 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
3665 { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
3666 { PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0 },
3667 { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
3668 { PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0 },
3669 { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
3670 { PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0 },
3671 { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
3672 { PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0 },
3673 { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
3674 { PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0 },
3675 { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
3676 { PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0 },
3677 { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
3678 { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
3679 { PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0 },
3680 { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
3681 { PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0 },
3682 { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
3683 { PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0 },
3684 { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
3685 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
3686 { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
3687 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
3688 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
3689 { PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0 },
3690 { PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0 },
3691 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
3692 { PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0 },
3693 { PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0 },
3694 { PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0 },
3695 { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
3696 { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
3697 { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
3698 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3699 { PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0 },
3700 { PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0 },
3701 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
3702 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
3703 { PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0 },
3704 { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
3705 { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
3706 { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
3707 { PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0 },
3708 { PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0 },
3709 { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
3710 { PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0 },
3711 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3712 { PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0 },
3713 { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
3714 { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
3715 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3716 { PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0 },
3717 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
3718 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
3719 { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
3720 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
3721 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
3722 { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
3723 { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
3724 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3725 { PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0 },
3726 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
3727 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
3728 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
3729 { PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0 },
3730 { PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0 },
3731 { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
3732 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
3733 { PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0 },
3734 { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
3735 { PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0 },
3736 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
3737 { PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0 },
3738 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
3739 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
3740 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
3741 { PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0 },
3742 { PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0 },
3743 { PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0 },
3744 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
3745 { PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0 },
3746 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
3747 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
3748 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
3749 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
3750 { PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0 },
3751 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
3752 { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
3753 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
3754 { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
3755 { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
3756 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
3757 { PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0 },
3758 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
3759 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
3760 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
3761 { PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0 },
3762 { PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0 },
3763 { PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0 },
3764 { PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0 },
3765 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
3766 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
3767 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
3768 { PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0 },
3769 { PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0 },
3770 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
3771 { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
3772 { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
3773 { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
3774 { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
3775 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
3776 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
3777 { PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0 },
3778 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
3779 { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
3780 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
3781 { PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0 },
3782 { PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0 },
3783 { PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0 },
3784 { PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0 },
3785 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
3786 { PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0 },
3787 { PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0 },
3788 { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
3789 { PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0 },
3790 { PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0 },
3791 { PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0 },
3792 { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
3793 { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
3794 { PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0 },
3795 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
3796 { PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0 },
3797 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
3798 { PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0 },
3799 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
3800 { PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0 },
3801 { PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0 },
3802 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
3803 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
3804 { PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0 },
3805 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
3806 { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
3807 { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
3808 { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
3809 { PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0 },
3810 { PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0 },
3811 { PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0 },
3812 { PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0 },
3813 { PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0 },
3814 { PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0 },
3815 { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
3816 { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
3817 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
3818 { PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0 },
3819 { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
3820 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
3821 { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
3822 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
3823 { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
3824 { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
3825 { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
3826 { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
3827 { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
3828 { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
3829 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
3830 { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
3831 { PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0 },
3832 { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
3833 { PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0 },
3834 { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
3835 { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
3836 { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
3837 { PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0 },
3838 { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
3839 { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
3840 { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
3841 { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
3842 { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
3843 { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
3844 { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
3845 { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
3846 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
3847 { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
3848 { PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0 },
3849 { PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0 },
3850 { PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0 },
3851 { PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0 },
3852 { PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0 },
3853 { PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0 },
3854 { PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0 },
3855 { PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0 },
3856 { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
3857 { PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0 },
3858 { PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0 },
3859 { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
3860 { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
3861 { PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0 },
3862 { PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0 },
3863 { PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0 },
3864 { PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0 },
3865 { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
3866 { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
3867 { PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0 },
3868 { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
3869 { PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0 },
3870 { PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0 },
3871 { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
3872 { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
3873 { PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0 },
3874 { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
3875 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
3876 { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
3877 { PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0 },
3878 { PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0 },
3879 { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
3880 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
3881 { PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0 },
3882 { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
3883 { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
3884 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
3885 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
3886 { PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0 },
3887 { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
3888 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
3889 { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
3890 { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
3891 { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
3892 { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
3893 { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
3894 { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
3895 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
3896 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
3897 { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
3898 { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
3899 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
3900 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
3901 { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
3902 { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
3903 { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
3904 { PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0 },
3905 { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
3906 { PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0 },
3907 { PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0 },
3908 { PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0 },
3909 { PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0 },
3910 { PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0 },
3911 { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
3912 { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
3913 { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
3914 { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
3915 { PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0 },
3916 { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
3917 { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
3918 { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
3919 { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
3920 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
3921 { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
3922 { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
3923 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
3924 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
3925 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
3926 { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
3927 { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
3928 { PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0 },
3929 { PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0 },
3930 { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
3931 { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
3932 { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
3933 { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
3934 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
3935 { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
3936 { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
3937 { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
3938 { PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0 },
3939 { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
3940 { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
3941 { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
3942 { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
3943 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
3944 { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
3945 { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
3946 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
3947 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
3948 { PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0 },
3949 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
3950 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
3951 { PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0 },
3952 { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
3953 { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
3954 { PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0 },
3955 { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
3956 { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
3957 { PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0 },
3958 { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
3959 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
3960 { PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0 },
3961 { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
3962 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
3963 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
3964 { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
3965 { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
3966 { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
3967 { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
3968 { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
3969 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
3970 { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
3971 { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
3972 { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
3973 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
3974 { PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0 },
3975 { PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0 },
3976 { PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0 },
3977 { PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0 },
3978 { PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0 },
3979 { PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0 },
3980 { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
3981 { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
3982 { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
3983 { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
3984 { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
3985 { PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0 },
3986 { PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0 },
3987 { PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0 },
3988 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
3989 { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
3990 { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
3991 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
3992 { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
3993 { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
3994 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
3995 { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
3996 { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
3997 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
3998 { PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0 },
3999 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
4000 { PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0 },
4001 { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
4002 { PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0 },
4003 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
4004 { PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0 },
4005 { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
4006 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
4007 { PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0 },
4008 { PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0 },
4009 { PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0 },
4010 { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
4011 { PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0 },
4012 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
4013 { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
4014 { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
4015 { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
4016 { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
4017 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4018 { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
4019 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
4020 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4021 { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
4022 { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
4023 { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
4024 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
4025 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
4026 { PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0 },
4027 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
4028 { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
4029 { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
4030 { PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0 },
4031 { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
4032 { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
4033 { PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0 },
4034 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
4035 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
4036 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
4037 { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
4038 { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
4039 { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
4040 { PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0 },
4041 { PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0 },
4042 { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
4043 { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
4044 { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
4045 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
4046 { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
4047 { PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0 },
4048 { PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0 },
4049 { PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0 },
4050 { PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0 },
4051 { PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0 },
4052 { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
4053 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
4054 { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
4055 { PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0 },
4056 { PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0 },
4057 { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
4058 { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
4059 { PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0 },
4060 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
4061 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
4062 { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
4063 { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
4064 { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
4065 { PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0 },
4066 { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
4067 { PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0 },
4068 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
4069 { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
4070 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
4071 { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
4072 { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
4073 { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
4074 { PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0 },
4075 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
4076 { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
4077 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4078 { PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0 },
4079 { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
4080 { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
4081 { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
4082 { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
4083 { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
4084 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
4085 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
4086 { PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0 },
4087 { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
4088 { PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0 },
4089 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
4090 { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
4091 { PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0 },
4092 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
4093 { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
4094 { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
4095 { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
4096 { PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0 },
4097 { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
4098 { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
4099 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
4100 { PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0 },
4101 { PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0 },
4102 { PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0 },
4103 { PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0 },
4104 { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
4105 { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
4106 { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
4107 { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
4108 { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
4109 { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
4110 { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
4111 { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
4112 { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
4113 { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
4114 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
4115 { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
4116 { PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0 },
4117 { PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0 },
4118 { PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0 },
4119 { PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0 },
4120 { PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0 },
4121 { PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0 },
4122 { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
4123 { PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0 },
4124 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
4125 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4126 { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
4127 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4128 { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
4129 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
4130 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
4131 { PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0 },
4132 { PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0 },
4133 { PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0 },
4134 { PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0 },
4135 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
4136 { PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0 },
4137 { PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0 },
4138 { PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0 },
4139 { PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0 },
4140 { PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0 },
4141 { PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0 },
4142 { PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0 },
4143 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
4144 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
4145 { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
4146 { PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0 },
4147 { PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0 },
4148 { PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0 },
4149 { PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0 },
4150 { PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0 },
4151 { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
4152 { PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0 },
4153 { PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0 },
4154 { PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0 },
4155 { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
4156 { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
4157 { PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0 },
4158 { PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0 },
4159 { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
4160 { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
4161 { PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0 },
4162 { PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0 },
4163 { PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0 },
4164 { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
4165 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
4166 { PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0 },
4167 { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
4168 { PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0 },
4169 { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
4170 { PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0 },
4171 { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
4172 { PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0 },
4173 { PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0 },
4174 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
4175 { PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0 },
4176 { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
4177 { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
4178 { PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0 },
4179 { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
4180 { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
4181 { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
4182 { PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0 },
4183 { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
4184 { PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0 },
4185 { PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0 },
4186 { PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0 },
4187 { PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0 },
4188 { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
4189 { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
4190 { PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0 },
4191 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4192 { PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0 },
4193 { PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0 },
4194 { PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0 },
4195 { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
4196 { PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0 },
4197 { PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0 },
4198 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
4199 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4200 { PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0 },
4201 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
4202 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
4203 { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
4204 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4205 { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
4206 { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
4207 { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
4208 { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
4209 { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
4210 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
4211 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
4212 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
4213 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4214 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
4215 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
4216 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
4217 { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
4218 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
4219 { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
4220 { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
4221 { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
4222 { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
4223 { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
4224 { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
4225 { PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0 },
4226 { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
4227 { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
4228 { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
4229 { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
4230 { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
4231 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
4232 { PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0 },
4233 { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
4234 { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
4235 { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
4236 { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
4237 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
4238 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
4239 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4240 { PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0 },
4241 { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
4242 { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
4243 { PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0 },
4244 { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
4245 { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
4246 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
4247 { PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0 },
4248 { PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0 },
4249 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
4250 { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
4251 { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
4252 { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
4253 { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
4254 { PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0 },
4255 { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
4256 { PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0 },
4257 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
4258 { PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0 },
4259 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
4260 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
4261 { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
4262 { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
4263 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
4264 { PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0 },
4265 { PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0 },
4266 { PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0 },
4267 { PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0 },
4268 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
4269 { PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0 },
4270 { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
4271 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
4272 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
4273 { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
4274 { PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0 },
4275 { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
4276 { PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0 },
4277 { PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0 },
4278 { PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0 },
4279 { PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0 },
4280 { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
4281 { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
4282 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
4283 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
4284 { PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0 },
4285 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
4286 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
4287 { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
4288 { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
4289 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
4290 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
4291 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
4292 { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
4293 { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
4294 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
4295 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4296 { PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0 },
4297 { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
4298 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
4299 { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
4300 { PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0 },
4301 { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
4302 { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
4303 { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
4304 { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
4305 { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
4306 { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
4307 { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
4308 { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
4309 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
4310 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
4311 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
4312 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4313 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
4314 { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
4315 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
4316 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
4317 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
4318 { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
4319 { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
4320 { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
4321 { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
4322 { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
4323 { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
4324 { PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0 },
4325 { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
4326 { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
4327 { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
4328 { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
4329 { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
4330 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
4331 { PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0 },
4332 { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
4333 { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
4334 { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
4335 { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
4336 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
4337 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
4338 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
4339 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
4340 { PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0 },
4341 { PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0 },
4342 { PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0 },
4343 { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
4344 { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
4345 { PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0 },
4346 { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
4347 { PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0 },
4348 { PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0 },
4349 { PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0 },
4350 { PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0 },
4351 { PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0 },
4352 { PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0 },
4353 { PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0 },
4354 { PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0 },
4355 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
4356 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
4357 { PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0 },
4358 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
4359 { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
4360 { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
4361 { PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0 },
4362 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
4363 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4364 { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
4365 { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
4366 { PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0 },
4367 { PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0 },
4368 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
4369 { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
4370 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
4371 { PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0 },
4372 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
4373 { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
4374 { PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0 },
4375 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
4376 { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
4377 { PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0 },
4378 { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
4379 { PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0 },
4380 { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
4381 { PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0 },
4382 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
4383 { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
4384 { PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0 },
4385 { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
4386 { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
4387 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4388 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4389 { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
4390 { PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0 },
4391 { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
4392 { PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0 },
4393 { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
4394 { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
4395 { PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0 },
4396 { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
4397 { PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0 },
4398 { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
4399 { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
4400 { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
4401 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
4402 { PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0 },
4403 { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
4404 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
4405 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4406 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4407 { PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0 },
4408 { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
4409 { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
4410 { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
4411 { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
4412 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
4413 { PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0 },
4414 { PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0 },
4415 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4416 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4417 { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
4418 { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
4419 { PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0 },
4420 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
4421 { PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0 },
4422 { PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0 },
4423 { PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0 },
4424 { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
4425 { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
4426 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4427 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4428 { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
4429 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
4430 { PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0 },
4431 { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
4432 { PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0 },
4433 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
4434 { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
4435 { PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0 },
4436 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4437 { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
4438 { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
4439 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4440 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4441 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
4442 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
4443 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
4444 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
4445 { PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0 },
4446 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4447 { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
4448 { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
4449 { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
4450 { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
4451 { PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0 },
4452 { PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0 },
4453 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
4454 { PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0 },
4455 { PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0 },
4456 { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
4457 { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
4458 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
4459 { PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0 },
4460 { PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0 },
4461 { PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0 },
4462 { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
4463 { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
4464 { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
4465 { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
4466 { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
4467 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
4468 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
4469 { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
4470 { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
4471 { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
4472 { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
4473 { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
4474 { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
4475 { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
4476 { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
4477 { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
4478 { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
4479 { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
4480 { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
4481 { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
4482 { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
4483 { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
4484 { PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0 },
4485 { PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0 },
4486 { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
4487 { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
4488 { PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0 },
4489 { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
4490 { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
4491 { PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0 },
4492 { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
4493 { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
4494 { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
4495 { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
4496 { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
4497 { PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0 },
4498 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
4499 { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
4500 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
4501 { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
4502 { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
4503 { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
4504 { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
4505 { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
4506 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
4507 { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
4508 { PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0 },
4509 { PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0 },
4510 { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
4511 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
4512 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
4513 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
4514 { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
4515 { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
4516 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
4517 { PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0 },
4518 { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
4519 { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
4520 { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
4521 { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
4522 { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
4523 { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
4524 { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
4525 { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
4526 { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
4527 { PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0 },
4528 { PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0 },
4529 { PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0 },
4530 { PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0 },
4531 { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
4532 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
4533 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4534 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
4535 { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
4536 { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
4537 { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
4538 { PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0 },
4539 { PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0 },
4540 { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
4541 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
4542 { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
4543 { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
4544 { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
4545 { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
4546 { PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0 },
4547 { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
4548 { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
4549 { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
4550 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
4551 { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
4552 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
4553 { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
4554 { PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0 },
4555 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4556 { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
4557 { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
4558 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
4559 { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
4560 { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
4561 { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
4562 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4563 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
4564 { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
4565 { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
4566 { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
4567 { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
4568 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
4569 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
4570 { PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0 },
4571 { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
4572 { PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0 },
4573 { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
4574 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
4575 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
4576 { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
4577 { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
4578 { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
4579 { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
4580 { PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0 },
4581 { PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0 },
4582 { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
4583 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
4584 { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
4585 { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
4586 { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
4587 { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
4588 { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
4589 { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
4590 { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
4591 { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
4592 { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
4593 { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
4594 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
4595 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4596 { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
4597 { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
4598 { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
4599 { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
4600 { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
4601 { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
4602 { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
4603 { PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0 },
4604 { PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0 },
4605 { PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0 },
4606 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
4607 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
4608 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
4609 { PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0 },
4610 { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
4611 { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
4612 { PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0 },
4613 { PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0 },
4614 { PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0 },
4615 { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
4616 { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
4617 { PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0 },
4618 { PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0 },
4619 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
4620 { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
4621 { PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0 },
4622 { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
4623 { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
4624 { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
4625 { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
4626 { PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0 },
4627 { PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0 },
4628 { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
4629 { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
4630 { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
4631 { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
4632 { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
4633 { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
4634 { PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0 },
4635 { PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0 },
4636 { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
4637 { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
4638 { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
4639 { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
4640 { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
4641 { PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0 },
4642 { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
4643 { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
4644 { PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0 },
4645 { PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0 },
4646 { PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0 },
4647 { PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0 },
4648 { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
4649 { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
4650 { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
4651 { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
4652 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
4653 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
4654 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
4655 { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
4656 { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
4657 { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
4658 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4659 { PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0 },
4660 { PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0 },
4661 { PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0 },
4662 { PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0 },
4663 { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
4664 { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
4665 { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
4666 { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
4667 { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
4668 { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
4669 { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
4670 { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
4671 { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
4672 { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
4673 { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
4674 { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
4675 { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
4676 { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
4677 { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
4678 { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
4679 { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
4680 { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
4681 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4682 { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
4683 { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
4684 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
4685 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
4686 { PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0 },
4687 { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
4688 { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
4689 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4690 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4691 { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
4692 { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
4693 { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
4694 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4695 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4696 { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
4697 { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
4698 { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
4699 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4700 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4701 { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
4702 { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
4703 { PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0 },
4704 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4705 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4706 { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
4707 { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
4708 { PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0 },
4709 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4710 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4711 { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
4712 { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
4713 { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
4714 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4715 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4716 { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
4717 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
4718 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4719 { PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0 },
4720 { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
4721 { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
4722 { PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0 },
4723 { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
4724 { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
4725 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
4726 { PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0 },
4727 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
4728 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
4729 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
4730 { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
4731 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
4732 { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
4733 { PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0 },
4734 { PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0 },
4735 { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
4736 { PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0 },
4737 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
4738 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4739 { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
4740 { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
4741 { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
4742 { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
4743 { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
4744 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
4745 { PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0 },
4746 { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
4747 { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
4748 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4749 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4750 { PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0 },
4751 { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
4752 { PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0 },
4753 { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
4754 { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
4755 { PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0 },
4756 { PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0 },
4757 { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
4758 { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
4759 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
4760 { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
4761 { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
4762 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
4763 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
4764 { PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0 },
4765 { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
4766 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
4767 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4768 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4769 { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
4770 { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
4771 { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
4772 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
4773 { PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0 },
4774 { PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0 },
4775 { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
4776 { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
4777 { PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0 },
4778 { PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0 },
4779 { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
4780 { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
4781 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4782 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4783 { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
4784 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
4785 { PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0 },
4786 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
4787 { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
4788 { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
4789 { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
4790 { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
4791 { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
4792 { PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0 },
4793 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4794 { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
4795 { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
4796 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4797 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4798 { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
4799 { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
4800 { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
4801 { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
4802 { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
4803 { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
4804 { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
4805 { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
4806 { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
4807 { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
4808 { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
4809 { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
4810 { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
4811 { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
4812 { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
4813 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4814 { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
4815 { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
4816 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
4817 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
4818 { PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0 },
4819 { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
4820 { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
4821 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4822 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4823 { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
4824 { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
4825 { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
4826 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4827 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4828 { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
4829 { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
4830 { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
4831 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4832 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4833 { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
4834 { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
4835 { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
4836 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4837 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4838 { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
4839 { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
4840 { PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0 },
4841 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4842 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4843 { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
4844 { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
4845 { PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0 },
4846 { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
4847 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
4848 { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
4849 { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
4850 { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
4851 { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
4852 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4853 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
4854 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
4855 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
4856 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
4857 { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
4858 { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
4859 { PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0 },
4860 { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
4861 { PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0 },
4862 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
4863 { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
4864 { PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0 },
4865 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
4866 { PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0 },
4867 { PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0 },
4868 { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
4869 { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
4870 { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
4871 { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
4872 { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
4873 { PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0 },
4874 { PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0 },
4875 { PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0 },
4876 { PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0 },
4877 { PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0 },
4878 { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
4879 { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
4880 { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
4881 { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
4882 { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
4883 { PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0 },
4884 { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
4885 { PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0 },
4886 { PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0 },
4887 { PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0 },
4888 { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
4889 { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
4890 { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
4891 { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
4892 { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
4893 { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
4894 { PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0 },
4895 { PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0 },
4896 { PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0 },
4897 { PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0 },
4898 { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
4899 { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
4900 { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
4901 { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
4902 { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
4903 { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
4904 { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
4905 { PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0 },
4906 { PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0 },
4907 { PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0 },
4908 { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
4909 { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
4910 { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
4911 { PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0 },
4912 { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
4913 { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
4914 { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
4915 { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
4916 { PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0 },
4917 { PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0 },
4918 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
4919 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
4920 { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
4921 { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
4922 { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
4923 { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
4924 { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
4925 { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
4926 { PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0 },
4927 { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
4928 { PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0 },
4929 { PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0 },
4930 { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
4931 { PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0 },
4932 { PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0 },
4933 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
4934 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
4935 { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
4936 { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
4937 { PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0 },
4938 { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
4939 { PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0 },
4940 { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
4941 { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
4942 { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
4943 { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
4944 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4945 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4946 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
4947 { PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0 },
4948 { PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0 },
4949 { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
4950 { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
4951 { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
4952 { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
4953 { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
4954 { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
4955 { PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0 },
4956 { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
4957 { PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0 },
4958 { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
4959 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
4960 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
4961 { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
4962 { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
4963 { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
4964 { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
4965 { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
4966 { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
4967 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
4968 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
4969 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
4970 { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
4971 { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
4972 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
4973 { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
4974 { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
4975 { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
4976 { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
4977 { PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0 },
4978 { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
4979 { PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0 },
4980 { PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0 },
4981 { PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0 },
4982 { PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0 },
4983 { PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0 },
4984 { PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0 },
4985 { PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0 },
4986 { PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0 },
4987 { PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0 },
4988 { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
4989 { PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0 },
4990 { PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0 },
4991 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
4992 { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
4993 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
4994 { PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0 },
4995 { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
4996 { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
4997 { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
4998 { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
4999 { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
5000 { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
5001 { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
5002 { PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0 },
5003 { PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0 },
5004 { PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0 },
5005 { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
5006 { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
5007 { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
5008 { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
5009 { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
5010 { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
5011 { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
5012 { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
5013 { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
5014 { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
5015 { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
5016 { PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0 },
5017 { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
5018 { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
5019 { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
5020 { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
5021 { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
5022 { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
5023 { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
5024 { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
5025 { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
5026 { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
5027 { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
5028 { PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0 },
5029 { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
5030 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
5031 { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
5032 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
5033 { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
5034 { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
5035 { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
5036 { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
5037 { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
5038 { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
5039 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
5040 { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
5041 { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
5042 { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
5043 { PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0 },
5044 { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
5045 { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
5046 { PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0 },
5047 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
5048 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
5049 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
5050 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
5051 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
5052 { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
5053 { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
5054 { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
5055 { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
5056 { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
5057 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
5058 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
5059 { PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0 },
5060 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
5061 { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
5062 { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
5063 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
5064 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
5065 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
5066 { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
5067 { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
5068 { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
5069 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
5070 { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
5071 { PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0 },
5072 { PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0 },
5073 { PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0 },
5074 { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
5075 { PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0 },
5076 { PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0 },
5077 { PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0 },
5078 { PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0 },
5079 { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
5080 { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
5081 { PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0 },
5082 { PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0 },
5083 { PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0 },
5084 { PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0 },
5085 { PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0 },
5086 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
5087 { PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0 },
5088 { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
5089 { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
5090 { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
5091 { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
5092 { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
5093 { PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0 },
5094 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
5095 { PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0 },
5096 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
5097 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
5098 { PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0 },
5099 { PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0 },
5100 { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
5101 { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
5102 { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
5103 { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
5104 { PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0 },
5105 { PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0 },
5106 { PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0 },
5107 { PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0 },
5108 { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
5109 { PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0 },
5110 { PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0 },
5111 { PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0 },
5112 { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
5113 { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
5114 { PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0 },
5115 { PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0 },
5116 { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
5117 { PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0 },
5118 { PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0 },
5119 { PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0 },
5120 { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
5121 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
5122 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
5123 { PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0 },
5124 { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
5125 { PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0 },
5126 { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
5127 { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
5128 { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
5129 { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
5130 { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
5131 { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
5132 { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
5133 { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
5134 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
5135 { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
5136 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5137 { PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0 },
5138 { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
5139 { PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0 },
5140 { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
5141 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
5142 { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
5143 { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
5144 { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
5145 { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
5146 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
5147 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
5148 { PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0 },
5149 { PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0 },
5150 { PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0 },
5151 { PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0 },
5152 { PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0 },
5153 { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
5154 { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
5155 { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
5156 { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
5157 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
5158 { PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0 },
5159 { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
5160 { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
5161 { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
5162 { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
5163 { PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0 },
5164 { PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0 },
5165 { PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0 },
5166 { PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0 },
5167 { PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0 },
5168 { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
5169 { PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0 },
5170 { PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0 },
5171 { PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0 },
5172 { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
5173 { PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0 },
5174 { PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0 },
5175 { PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0 },
5176 { PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0 },
5177 { PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0 },
5178 { PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0 },
5179 { PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0 },
5180 { PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0 },
5181 { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
5182 { PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0 },
5183 { PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0 },
5184 { PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0 },
5185 { PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0 },
5186 { PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0 },
5187 { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
5188 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5189 { PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0 },
5190 { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
5191 { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
5192 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
5193 { PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0 },
5194 { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
5195 { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
5196 { PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0 },
5197 { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
5198 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5199 { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
5200 { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
5201 { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
5202 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
5203 { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
5204 { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
5205 { PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0 },
5206 { PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0 },
5207 { PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0 },
5208 { PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0 },
5209 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
5210 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
5211 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
5212 { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
5213 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
5214 { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
5215 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
5216 { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
5217 { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
5218 { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
5219 { PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0 },
5220 { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
5221 { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
5222 { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
5223 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
5224 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
5225 { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
5226 { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
5227 { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
5228 { PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0 },
5229 { PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0 },
5230 { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
5231 { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
5232 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
5233 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
5234 { PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0 },
5235 { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
5236 { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
5237 { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
5238 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
5239 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
5240 { PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0 },
5241 { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
5242 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
5243 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
5244 { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
5245 { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
5246 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
5247 { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
5248 { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
5249 { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
5250 { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
5251 { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
5252 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
5253 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
5254 { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
5255 { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
5256 { PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0 },
5257 { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
5258 { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
5259 { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
5260 { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
5261 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
5262 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
5263 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
5264 { PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0 },
5265 { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
5266 { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
5267 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
5268 { PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0 },
5269 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
5270 { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
5271 { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
5272 { PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0 },
5273 { PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0 },
5274 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
5275 { PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0 },
5276 { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
5277 { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
5278 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
5279 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
5280 { PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0 },
5281 { PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0 },
5282 { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
5283 { PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0 },
5284 { PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0 },
5285 { PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0 },
5286 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
5287 { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
5288 { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
5289 { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
5290 { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
5291 { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
5292 { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
5293 { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
5294 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
5295 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
5296 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
5297 { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
5298 { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
5299 { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
5300 { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
5301 { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
5302 { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
5303 { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
5304 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
5305 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
5306 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
5307 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
5308 { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
5309 { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
5310 { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
5311 { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
5312 { PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0 },
5313 { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
5314 { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
5315 { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
5316 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5317 { PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0 },
5318 { PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0 },
5319 { PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0 },
5320 { PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0 },
5321 { PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0 },
5322 { PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0 },
5323 { PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0 },
5324 { PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0 },
5325 { PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0 },
5326 { PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0 },
5327 { PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0 },
5328 { PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0 },
5329 { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
5330 { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
5331 { PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0 },
5332 { PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0 },
5333 { PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0 },
5334 { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
5335 { PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0 },
5336 { PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0 },
5337 { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
5338 { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
5339 { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
5340 { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
5341 { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
5342 { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
5343 { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
5344 { PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0 },
5345 { PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0 },
5346 { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
5347 { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
5348 { PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0 },
5349 { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
5350 { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
5351 { PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0 },
5352 { PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0 },
5353 { PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0 },
5354 { PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0 },
5355 { PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0 },
5356 { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
5357 { PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0 },
5358 { PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0 },
5359 { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
5360 { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
5361 { PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0 },
5362 { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
5363 { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
5364 { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
5365 { PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0 },
5366 { PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0 },
5367 { PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0 },
5368 { PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0 },
5369 { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
5370 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
5371 { PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0 },
5372 { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
5373 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5374 { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
5375 { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
5376 { PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0 },
5377 { PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0 },
5378 { PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0 },
5379 { PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0 },
5380 { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
5381 { PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0 },
5382 { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
5383 { PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0 },
5384 { PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0 },
5385 { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
5386 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5387 { PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0 },
5388 { PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0 },
5389 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
5390 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
5391 { PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0 },
5392 { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
5393 { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
5394 { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
5395 { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
5396 { PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0 },
5397 { PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0 },
5398 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
5399 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5400 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
5401 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
5402 { PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0 },
5403 { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
5404 { PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0 },
5405 { PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0 },
5406 { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
5407 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
5408 { PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0 },
5409 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
5410 { PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0 },
5411 { PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0 },
5412 { PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0 },
5413 { PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0 },
5414 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
5415 { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
5416 { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
5417 { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
5418 { PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0 },
5419 { PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0 },
5420 { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
5421 { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
5422 { PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0 },
5423 { PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0 },
5424 { PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0 },
5425 { PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0 },
5426 { PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0 },
5427 { PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0 },
5428 { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
5429 { PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0 },
5430 { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
5431 { PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0 },
5432 { PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0 },
5433 { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
5434 { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
5435 { PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0 },
5436 { PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0 },
5437 { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
5438 { PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0 },
5439 { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
5440 { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
5441 { PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0 },
5442 { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
5443 { PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0 },
5444 { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
5445 { PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0 },
5446 { PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0 },
5447 { PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0 },
5448 { PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0 },
5449 { PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0 },
5450 { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
5451 { PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0 },
5452 { PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0 },
5453 { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
5454 { PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0 },
5455 { PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0 },
5456 { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
5457 { PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0 },
5458 { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
5459 { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
5460 { PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0 },
5461 { PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0 },
5462 { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
5463 { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
5464 { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
5465 { PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0 },
5466 { PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0 },
5467 { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
5468 { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
5469 { PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0 },
5470 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
5471 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
5472 { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
5473 { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
5474 { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
5475 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
5476 { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
5477 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
5478 { PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0 },
5479 { PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0 },
5480 { PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0 },
5481 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
5482 { PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0 },
5483 { PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0 },
5484 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
5485 { PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0 },
5486 { PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0 },
5487 { PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0 },
5488 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
5489 { PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0 },
5490 { PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0 },
5491 { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
5492 { PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0 },
5493 { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
5494 { PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0 },
5495 { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
5496 { PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0 },
5497 { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
5498 { PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0 },
5499 { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
5500 { PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0 },
5501 { PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0 },
5502 { PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0 },
5503 { PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0 },
5504 { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
5505 { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
5506 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5507 { PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0 },
5508 { PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0 },
5509 { PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0 },
5510 { PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0 },
5511 { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
5512 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5513 { PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0 },
5514 { PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0 },
5515 { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
5516 { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
5517 { PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0 },
5518 { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
5519 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5520 { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
5521 { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
5522 { PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0 },
5523 { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
5524 { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
5525 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5526 { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
5527 { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
5528 { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
5529 { PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0 },
5530 { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
5531 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
5532 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
5533 { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
5534 { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
5535 { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
5536 { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
5537 { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
5538 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5539 { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
5540 { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
5541 { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
5542 { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
5543 { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
5544 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5545 { PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0 },
5546 { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
5547 { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
5548 { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
5549 { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
5550 { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
5551 { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
5552 { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
5553 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5554 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
5555 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
5556 { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
5557 { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
5558 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5559 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
5560 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
5561 { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
5562 { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
5563 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
5564 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5565 { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
5566 { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
5567 { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
5568 { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
5569 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5570 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5571 { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
5572 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5573 { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
5574 { PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0 },
5575 { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
5576 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5577 { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
5578 { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
5579 { PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0 },
5580 { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
5581 { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
5582 { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
5583 { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
5584 { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
5585 { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
5586 { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
5587 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5588 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
5589 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
5590 { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
5591 { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
5592 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
5593 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5594 { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
5595 { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
5596 { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
5597 { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
5598 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5599 { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
5600 { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
5601 { PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0 },
5602 { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
5603 { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
5604 { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
5605 { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
5606 { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
5607 { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
5608 { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
5609 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
5610 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5611 { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
5612 { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
5613 { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
5614 { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
5615 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5616 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
5617 { PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0 },
5618 { PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0 },
5619 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
5620 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
5621 { PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0 },
5622 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
5623 { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
5624 { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
5625 { PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0 },
5626 { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
5627 { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
5628 { PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0 },
5629 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
5630 { PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0 },
5631 { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
5632 { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
5633 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
5634 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
5635 { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
5636 { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
5637 { PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0 },
5638 { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
5639 { PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0 },
5640 { PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0 },
5641 { PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0 },
5642 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
5643 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
5644 { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
5645 { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
5646 { PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0 },
5647 { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
5648 { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
5649 { PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0 },
5650 { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
5651 { PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0 },
5652 { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
5653 { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
5654 { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
5655 { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
5656 { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
5657 { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
5658 { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
5659 { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
5660 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
5661 { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
5662 { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
5663 { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
5664 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5665 { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
5666 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
5667 { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
5668 { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
5669 { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
5670 { PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0 },
5671 { PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0 },
5672 { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
5673 { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
5674 { PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0 },
5675 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
5676 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
5677 { PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0 },
5678 { PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0 },
5679 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
5680 { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
5681 { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
5682 { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
5683 { PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0 },
5684 { PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0 },
5685 { PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0 },
5686 { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
5687 { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
5688 { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
5689 { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
5690 { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
5691 { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
5692 { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
5693 { PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0 },
5694 { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
5695 { PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0 },
5696 { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
5697 { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
5698 { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
5699 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
5700 { PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0 },
5701 { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
5702 { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
5703 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
5704 { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
5705 { PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0 },
5706 { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
5707 { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
5708 { PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0 },
5709 { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
5710 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
5711 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
5712 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
5713 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
5714 { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
5715 { PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0 },
5716 { PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0 },
5717 { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
5718 { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
5719 { PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0 },
5720 { PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0 },
5721 { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
5722 { PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0 },
5723 { PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0 },
5724 { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
5725 { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
5726 { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
5727 { PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0 },
5728 { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
5729 { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
5730 { PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0 },
5731 { PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0 },
5732 { PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0 },
5733 { PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0 },
5734 { PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0 },
5735 { PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0 },
5736 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
5737 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
5738 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
5739 { PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0 },
5740 { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
5741 { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
5742 { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
5743 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
5744 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
5745 { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
5746 { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
5747 { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
5748 { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
5749 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
5750 { PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0 },
5751 { PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0 },
5752 { PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0 },
5753 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5754 { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
5755 { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
5756 { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
5757 { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
5758 { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
5759 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
5760 { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
5761 { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
5762 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
5763 { PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0 },
5764 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
5765 { PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0 },
5766 { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
5767 { PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0 },
5768 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
5769 { PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0 },
5770 { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
5771 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
5772 { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
5773 { PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0 },
5774 { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
5775 { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
5776 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
5777 { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
5778 { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
5779 { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
5780 { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
5781 { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
5782 { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
5783 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
5784 { PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0 },
5785 { PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0 },
5786 { PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0 },
5787 { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
5788 { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
5789 { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
5790 { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
5791 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
5792 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
5793 { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
5794 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
5795 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
5796 { PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0 },
5797 { PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0 },
5798 { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
5799 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
5800 { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
5801 { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
5802 { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
5803 { PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0 },
5804 { PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0 },
5805 { PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0 },
5806 { PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0 },
5807 { PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0 },
5808 { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
5809 { PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0 },
5810 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
5811 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
5812 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
5813 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
5814 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
5815 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
5816 { PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0 },
5817 { PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0 },
5818 { PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0 },
5819 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
5820 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
5821 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5822 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
5823 { PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0 },
5824 { PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0 },
5825 { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
5826 { PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0 },
5827 { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
5828 { PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0 },
5829 { PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0 },
5830 { PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0 },
5831 { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
5832 { PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0 },
5833 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
5834 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
5835 { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
5836 { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
5837 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5838 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
5839 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
5840 { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
5841 { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
5842 { PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0 },
5843 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
5844 { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
5845 { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
5846 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5847 { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
5848 { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
5849 { PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0 },
5850 { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
5851 { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
5852 { PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0 },
5853 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
5854 { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
5855 { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
5856 { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
5857 { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
5858 { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
5859 { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
5860 { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
5861 { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
5862 { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
5863 { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
5864 { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
5865 { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
5866 { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
5867 { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
5868 { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
5869 { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
5870 { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
5871 { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
5872 { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
5873 { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
5874 { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
5875 { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
5876 { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
5877 { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
5878 { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
5879 { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
5880 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
5881 { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
5882 { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
5883 { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
5884 { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
5885 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
5886 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
5887 { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
5888 { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
5889 { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
5890 { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
5891 { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
5892 { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
5893 { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
5894 { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
5895 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
5896 { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
5897 { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
5898 { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
5899 { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
5900 { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
5901 { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
5902 { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
5903 { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
5904 { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
5905 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
5906 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
5907 { PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0 },
5908 { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
5909 { PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0 },
5910 { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
5911 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
5912 { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
5913 { PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0 },
5914 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
5915 { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
5916 { PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0 },
5917 { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
5918 { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
5919 { PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0 },
5920 { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
5921 { PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0 },
5922 { PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0 },
5923 { PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0 },
5924 { PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0 },
5925 { PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0 },
5926 { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
5927 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5928 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
5929 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
5930 { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
5931 { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
5932 { PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0 },
5933 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
5934 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
5935 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
5936 { PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0 },
5937 { PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0 },
5938 { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
5939 { PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0 },
5940 { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
5941 { PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0 },
5942 { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
5943 { PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0 },
5944 { PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0 },
5945 { PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0 },
5946 { PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0 },
5947 { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
5948 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
5949 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
5950 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
5951 { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
5952 { PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0 },
5953 { PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0 },
5954 { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
5955 { PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0 },
5956 { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
5957 { PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0 },
5958 { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
5959 { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
5960 { PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0 },
5961 { PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0 },
5962 { PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0 },
5963 { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
5964 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
5965 { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
5966 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
5967 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
5968 { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
5969 { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
5970 { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
5971 { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
5972 { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
5973 { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
5974 { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
5975 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
5976 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
5977 { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
5978 { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
5979 { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
5980 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
5981 { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
5982 { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
5983 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
5984 { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
5985 { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
5986 { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
5987 { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
5988 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
5989 { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
5990 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
5991 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
5992 { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
5993 { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
5994 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
5995 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
5996 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
5997 { PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0 },
5998 { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
5999 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
6000 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
6001 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
6002 { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
6003 { PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0 },
6004 { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
6005 { PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0 },
6006 { PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0 },
6007 { PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0 },
6008 { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
6009 { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
6010 { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
6011 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
6012 { PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0 },
6013 { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
6014 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
6015 { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
6016 { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
6017 { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
6018 { PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0 },
6019 { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
6020 { PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0 },
6021 { PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0 },
6022 { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
6023 { PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0 },
6024 { PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0 },
6025 { PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0 },
6026 { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
6027 { PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0 },
6028 { PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0 },
6029 { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
6030 { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
6031 { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
6032 { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
6033 { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
6034 { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
6035 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
6036 { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
6037 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
6038 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
6039 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
6040 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
6041 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
6042 { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
6043 { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
6044 { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
6045 { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
6046 { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
6047 { PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0 },
6048 { PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0 },
6049 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
6050 { PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0 },
6051 { PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0 },
6052 { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
6053 { PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0 },
6054 { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
6055 { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
6056 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
6057 { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
6058 { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
6059 { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
6060 { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
6061 { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
6062 { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
6063 { PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0 },
6064 { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
6065 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
6066 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
6067 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
6068 { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
6069 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
6070 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
6071 { PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0 },
6072 { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
6073 { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
6074 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6075 { PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0 },
6076 { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
6077 { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
6078 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6079 { PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0 },
6080 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
6081 { PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0 },
6082 { PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0 },
6083 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
6084 { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
6085 { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
6086 { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
6087 { PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0 },
6088 { PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0 },
6089 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
6090 { PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0 },
6091 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
6092 { PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0 },
6093 { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
6094 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
6095 { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
6096 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
6097 { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
6098 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
6099 { PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0 },
6100 { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
6101 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
6102 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
6103 { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
6104 { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
6105 { PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0 },
6106 { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
6107 { PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0 },
6108 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
6109 { PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0 },
6110 { PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0 },
6111 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6112 { PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0 },
6113 { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
6114 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
6115 { PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0 },
6116 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
6117 { PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0 },
6118 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
6119 { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
6120 { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
6121 { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
6122 { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
6123 { PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0 },
6124 { PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0 },
6125 { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
6126 { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
6127 { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
6128 { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
6129 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
6130 { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
6131 { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
6132 { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
6133 { PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0 },
6134 { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
6135 { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
6136 { PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0 },
6137 { PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0 },
6138 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
6139 { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
6140 { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
6141 { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
6142 { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
6143 { PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0 },
6144 { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
6145 { PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0 },
6146 { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
6147 { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
6148 { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
6149 { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
6150 { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
6151 { PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0 },
6152 { PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0 },
6153 { PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0 },
6154 { PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0 },
6155 { PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0 },
6156 { PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0 },
6157 { PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0 },
6158 { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
6159 { PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0 },
6160 { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
6161 { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
6162 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
6163 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
6164 { PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0 },
6165 { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
6166 { PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0 },
6167 { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
6168 { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
6169 { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
6170 { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
6171 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
6172 { PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0 },
6173 { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
6174 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
6175 { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
6176 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
6177 { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
6178 { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
6179 { PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0 },
6180 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
6181 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
6182 { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
6183 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
6184 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
6185 { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
6186 { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
6187 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
6188 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
6189 { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
6190 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
6191 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
6192 { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
6193 { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
6194 { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
6195 { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
6196 { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
6197 { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
6198 { PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0 },
6199 { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
6200 { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
6201 { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
6202 { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
6203 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
6204 { PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0 },
6205 { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
6206 { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
6207 { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
6208 { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
6209 { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
6210 { PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0 },
6211 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
6212 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6213 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
6214 { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
6215 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
6216 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
6217 { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
6218 { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
6219 { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
6220 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
6221 { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
6222 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
6223 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
6224 { PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0 },
6225 { PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0 },
6226 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
6227 { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
6228 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
6229 { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
6230 { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
6231 { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
6232 { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
6233 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
6234 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
6235 { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
6236 { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
6237 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
6238 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
6239 { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
6240 { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
6241 { PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0 },
6242 { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
6243 { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
6244 { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
6245 { PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0 },
6246 { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
6247 { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
6248 { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
6249 { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
6250 { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
6251 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
6252 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
6253 { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
6254 { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
6255 { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
6256 { PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0 },
6257 { PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0 },
6258 { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
6259 { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
6260 { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
6261 { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
6262 { PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0 },
6263 { PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0 },
6264 { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
6265 { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
6266 { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
6267 { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
6268 { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
6269 { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
6270 { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
6271 { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
6272 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
6273 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
6274 { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
6275 { PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0 },
6276 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6277 { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
6278 { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
6279 { PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0 },
6280 { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
6281 { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
6282 { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
6283 { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
6284 { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
6285 { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
6286 { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
6287 { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
6288 { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
6289 { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
6290 { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
6291 { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
6292 { PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0 },
6293 { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
6294 { PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0 },
6295 { PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0 },
6296 { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
6297 { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
6298 { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
6299 { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
6300 { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
6301 { PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0 },
6302 { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
6303 { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
6304 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
6305 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
6306 { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
6307 { PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0 },
6308 { PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0 },
6309 { PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0 },
6310 { PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0 },
6311 { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
6312 { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
6313 { PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0 },
6314 { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
6315 { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
6316 { PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0 },
6317 { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
6318 { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
6319 { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
6320 { PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0 },
6321 { PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0 },
6322 { PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0 },
6323 { PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0 },
6324 { PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0 },
6325 { PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0 },
6326 { PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0 },
6327 { PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0 },
6328 { PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0 },
6329 { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
6330 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
6331 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6332 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
6333 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
6334 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
6335 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
6336 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
6337 { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
6338 { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
6339 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
6340 { PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0 },
6341 { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
6342 { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
6343 { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
6344 { PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0 },
6345 { PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0 },
6346 { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
6347 { PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0 },
6348 { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
6349 { PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0 },
6350 { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
6351 { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
6352 { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
6353 { PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0 },
6354 { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
6355 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
6356 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
6357 { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
6358 { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
6359 { PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0 },
6360 { PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0 },
6361 { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
6362 { PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0 },
6363 { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
6364 { PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0 },
6365 { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
6366 { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
6367 { PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0 },
6368 { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
6369 { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
6370 { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
6371 { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
6372 { PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0 },
6373 { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
6374 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
6375 { PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0 },
6376 { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
6377 { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
6378 { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
6379 { PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0 },
6380 { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
6381 { PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0 },
6382 { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
6383 { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
6384 { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
6385 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
6386 { PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0 },
6387 { PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0 },
6388 { PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0 },
6389 { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
6390 { PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0 },
6391 { PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0 },
6392 { PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0 },
6393 { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
6394 { PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0 },
6395 { PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0 },
6396 { PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0 },
6397 { PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0 },
6398 { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
6399 { PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0 },
6400 { PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0 },
6401 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
6402 { PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0 },
6403 { PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0 },
6404 { PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0 },
6405 { PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0 },
6406 { PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0 },
6407 { PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0 },
6408 { PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0 },
6409 { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
6410 { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
6411 { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
6412 { PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0 },
6413 { PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0 },
6414 { PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0 },
6415 { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
6416 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
6417 { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
6418 { PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0 },
6419 { PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0 },
6420 { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
6421 { PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0 },
6422 { PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0 },
6423 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
6424 { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
6425 { PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0 },
6426 { PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0 },
6427 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
6428 { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
6429 { PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0 },
6430 { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
6431 { PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0 },
6432 { PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0 },
6433 { PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0 },
6434 { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
6435 { PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0 },
6436 { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
6437 { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
6438 { PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0 },
6439 { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
6440 { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
6441 { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
6442 { PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0 },
6443 { PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0 },
6444 { PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0 },
6445 { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
6446 { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
6447 { PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0 },
6448 { PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0 },
6449 { PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0 },
6450 { PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0 },
6451 { PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0 },
6452 { PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0 },
6453 { PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0 },
6454 { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
6455 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6456 { PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0 },
6457 { PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0 },
6458 { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
6459 { PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0 },
6460 { PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0 },
6461 { PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0 },
6462 { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
6463 { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
6464 { PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0 },
6465 { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
6466 { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
6467 { PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0 },
6468 { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
6469 { PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0 },
6470 { PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0 },
6471 { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
6472 { PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0 },
6473 { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
6474 { PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0 },
6475 { PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0 },
6476 { PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0 },
6477 { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
6478 { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
6479 { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
6480 { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
6481 { PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0 },
6482 { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
6483 { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
6484 { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
6485 { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
6486 { PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0 },
6487 { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
6488 { PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0 },
6489 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6490 { PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0 },
6491 { PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0 },
6492 { PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0 },
6493 { PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0 },
6494 { PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0 },
6495 { PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0 },
6496 { PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0 },
6497 { PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0 },
6498 { PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0 },
6499 { PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0 },
6500 { PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0 },
6501 { PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0 },
6502 { PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0 },
6503 { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
6504 { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
6505 { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
6506 { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
6507 { PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0 },
6508 { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
6509 { PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0 },
6510 { PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0 },
6511 { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
6512 { PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0 },
6513 { PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0 },
6514 { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
6515 { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
6516 { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
6517 { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
6518 { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
6519 { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
6520 { PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0 },
6521 { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
6522 { PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0 },
6523 { PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0 },
6524 { PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0 },
6525 { PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0 },
6526 { PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0 },
6527 { PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0 },
6528 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
6529 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
6530 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6531 { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
6532 { PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0 },
6533 { PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0 },
6534 { PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0 },
6535 { PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0 },
6536 { PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0 },
6537 { PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0 },
6538 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
6539 { PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0 },
6540 { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
6541 { PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0 },
6542 { PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0 },
6543 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
6544 { PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0 },
6545 { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
6546 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
6547 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
6548 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
6549 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
6550 { PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0 },
6551 { PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0 },
6552 { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
6553 { PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0 },
6554 { PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0 },
6555 { PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0 },
6556 { PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0 },
6557 { PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0 },
6558 { PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0 },
6559 { PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0 },
6560 { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
6561 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
6562 { PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0 },
6563 { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
6564 { PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0 },
6565 { PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0 },
6566 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
6567 { PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0 },
6568 { PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0 },
6569 { PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0 },
6570 { PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0 },
6571 { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
6572 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
6573 { PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0 },
6574 { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
6575 { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
6576 { PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0 },
6577 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
6578 { PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0 },
6579 { PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0 },
6580 { PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0 },
6581 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
6582 { PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0 },
6583 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6584 { PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0 },
6585 { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
6586 { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
6587 { PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0 },
6588 { PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0 },
6589 { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
6590 { PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0 },
6591 { PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0 },
6592 { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
6593 { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
6594 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
6595 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6596 { PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0 },
6597 { PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0 },
6598 { PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0 },
6599 { PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0 },
6600 { PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0 },
6601 { PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0 },
6602 { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
6603 { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
6604 { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
6605 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
6606 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6607 { PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0 },
6608 { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
6609 { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
6610 { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
6611 { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
6612 { PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0 },
6613 { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
6614 { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
6615 { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
6616 { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
6617 { PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0 },
6618 { PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0 },
6619 { PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0 },
6620 { PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0 },
6621 { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
6622 { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
6623 { PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0 },
6624 { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
6625 { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
6626 { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
6627 { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
6628 { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
6629 { PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0 },
6630 { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
6631 { PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0 },
6632 { PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0 },
6633 { PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0 },
6634 { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
6635 { PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0 },
6636 { PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0 },
6637 { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
6638 { PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0 },
6639 { PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0 },
6640 { PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0 },
6641 { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
6642 { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
6643 { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
6644 { PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0 },
6645 { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
6646 { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
6647 { PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0 },
6648 { PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0 },
6649 { PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0 },
6650 { PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0 },
6651 { PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0 },
6652 { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
6653 { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
6654 { PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0 },
6655 { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
6656 { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
6657 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
6658 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6659 { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
6660 { PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0 },
6661 { PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0 },
6662 { PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0 },
6663 { PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0 },
6664 { PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0 },
6665 { PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0 },
6666 { PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0 },
6667 { PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0 },
6668 { PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0 },
6669 { PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0 },
6670 { PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0 },
6671 { PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0 },
6672 { PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0 },
6673 { PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0 },
6674 { PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0 },
6675 { PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0 },
6676 { PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0 },
6677 { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
6678 { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
6679 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6680 { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
6681 { PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0 },
6682 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
6683 { PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0 },
6684 { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
6685 { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
6686 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
6687 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
6688 { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
6689 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
6690 { PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0 },
6691 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
6692 { PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0 },
6693 { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
6694 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
6695 { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
6696 { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
6697 { PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0 },
6698 { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
6699 { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
6700 { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
6701 { PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0 },
6702 { PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0 },
6703 { PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0 },
6704 { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
6705 { PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0 },
6706 { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
6707 { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
6708 { PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0 },
6709 { PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0 },
6710 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
6711 { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
6712 { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
6713 { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
6714 { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
6715 { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
6716 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
6717 { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
6718 { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
6719 { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
6720 { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
6721 { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
6722 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
6723 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
6724 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
6725 { PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0 },
6726 { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
6727 { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
6728 { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
6729 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
6730 { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
6731 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
6732 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
6733 { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
6734 { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
6735 { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
6736 { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
6737 { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
6738 { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
6739 { PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0 },
6740 { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
6741 { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
6742 { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
6743 { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
6744 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
6745 { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
6746 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
6747 { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
6748 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
6749 { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
6750 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6751 { PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0 },
6752 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
6753 { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
6754 { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
6755 { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
6756 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
6757 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
6758 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6759 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
6760 { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
6761 { PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0 },
6762 { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
6763 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
6764 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
6765 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6766 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
6767 { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
6768 { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
6769 { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
6770 { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
6771 { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
6772 { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
6773 { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
6774 { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
6775 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
6776 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
6777 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
6778 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
6779 { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
6780 { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
6781 { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
6782 { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
6783 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
6784 { PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0 },
6785 { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
6786 { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
6787 { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
6788 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
6789 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
6790 { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
6791 { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
6792 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
6793 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
6794 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6795 { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
6796 { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
6797 { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
6798 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
6799 { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
6800 { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
6801 { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
6802 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
6803 { PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0 },
6804 { PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0 },
6805 { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
6806 { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
6807 { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
6808 { PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0 },
6809 { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
6810 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
6811 { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
6812 { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
6813 { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
6814 { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
6815 { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
6816 { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
6817 { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
6818 { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
6819 { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
6820 { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
6821 { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
6822 { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
6823 { PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0 },
6824 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
6825 { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
6826 { PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0 },
6827 { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
6828 { PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0 },
6829 { PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0 },
6830 { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
6831 { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
6832 { PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0 },
6833 { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
6834 { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
6835 { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
6836 { PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0 },
6837 { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
6838 { PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0 },
6839 { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
6840 { PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0 },
6841 { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
6842 { PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0 },
6843 { PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0 },
6844 { PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0 },
6845 { PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0 },
6846 { PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0 },
6847 { PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0 },
6848 { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
6849 { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
6850 { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
6851 { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
6852 { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
6853 { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
6854 { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
6855 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
6856 { PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0 },
6857 { PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0 },
6858 { PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0 },
6859 { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
6860 { PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0 },
6861 { PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0 },
6862 { PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0 },
6863 { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
6864 { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
6865 { PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0 },
6866 { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
6867 { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
6868 { PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0 },
6869 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
6870 { PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0 },
6871 { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
6872 { PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0 },
6873 { PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0 },
6874 { PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0 },
6875 { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
6876 { PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0 },
6877 { PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0 },
6878 { PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0 },
6879 { PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0 },
6880 { PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0 },
6881 { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
6882 { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
6883 { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
6884 { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
6885 { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
6886 { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
6887 { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
6888 { PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0 },
6889 { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
6890 { PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0 },
6891 { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
6892 { PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0 },
6893 { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
6894 { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
6895 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
6896 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
6897 { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
6898 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
6899 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
6900 { PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0 },
6901 { PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0 },
6902 { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
6903 { PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0 },
6904 { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
6905 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
6906 { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
6907 { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
6908 { PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0 },
6909 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
6910 { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
6911 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
6912 { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
6913 { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
6914 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
6915 { PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0 },
6916 { PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0 },
6917 { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
6918 { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
6919 { PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0 },
6920 { PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0 },
6921 { PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0 },
6922 { PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0 },
6923 { PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0 },
6924 { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
6925 { PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0 },
6926 { PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0 },
6927 { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
6928 { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
6929 { PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0 },
6930 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
6931 { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
6932 { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
6933 { PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0 },
6934 { PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0 },
6935 { PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0 },
6936 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
6937 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
6938 { PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0 },
6939 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
6940 { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
6941 { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
6942 { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
6943 { PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0 },
6944 { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
6945 { PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0 },
6946 { PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0 },
6947 { PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0 },
6948 { PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0 },
6949 { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
6950 { PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0 },
6951 { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
6952 { PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0 },
6953 { PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0 },
6954 { PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0 },
6955 { PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0 },
6956 { PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0 },
6957 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
6958 { PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0 },
6959 { PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0 },
6960 { PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0 },
6961 { PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0 },
6962 { PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0 },
6963 { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
6964 { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
6965 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
6966 { PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0 },
6967 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
6968 { PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0 },
6969 { PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0 },
6970 { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
6971 { PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0 },
6972 { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
6973 { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
6974 { PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0 },
6975 { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
6976 { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
6977 { PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0 },
6978 { PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0 },
6979 { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
6980 { PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0 },
6981 { PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0 },
6982 { PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0 },
6983 { PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0 },
6984 { PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0 },
6985 { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
6986 { PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0 },
6987 { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
6988 { PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0 },
6989 { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
6990 { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
6991 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
6992 { PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0 },
6993 { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
6994 { PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0 },
6995 { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
6996 { PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0 },
6997 { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
6998 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
6999 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7000 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7001 { PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0 },
7002 { PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0 },
7003 { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
7004 { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
7005 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7006 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7007 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7008 { PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0 },
7009 { PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0 },
7010 { PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0 },
7011 { PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0 },
7012 { PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0 },
7013 { PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0 },
7014 { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
7015 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
7016 { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
7017 { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
7018 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
7019 { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
7020 { PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0 },
7021 { PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0 },
7022 { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
7023 { PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0 },
7024 { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
7025 { PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0 },
7026 { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
7027 { PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0 },
7028 { PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0 },
7029 { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
7030 { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
7031 { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
7032 { PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0 },
7033 { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
7034 { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
7035 { PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0 },
7036 { PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0 },
7037 { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
7038 { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
7039 { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
7040 { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
7041 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
7042 { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
7043 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
7044 { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
7045 { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
7046 { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
7047 { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
7048 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
7049 { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
7050 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
7051 { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
7052 { PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0 },
7053 { PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0 },
7054 { PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0 },
7055 { PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0 },
7056 { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
7057 { PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0 },
7058 { PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0 },
7059 { PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0 },
7060 { PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0 },
7061 { PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0 },
7062 { PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0 },
7063 { PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0 },
7064 { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
7065 { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
7066 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
7067 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
7068 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
7069 { PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0 },
7070 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
7071 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
7072 { PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0 },
7073 { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
7074 { PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0 },
7075 { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
7076 { PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0 },
7077 { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
7078 { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
7079 { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
7080 { PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0 },
7081 { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
7082 { PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0 },
7083 { PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0 },
7084 { PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0 },
7085 { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
7086 { PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0 },
7087 { PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0 },
7088 { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
7089 { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
7090 { PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0 },
7091 { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
7092 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
7093 { PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0 },
7094 { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
7095 { PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0 },
7096 { PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0 },
7097 { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
7098 { PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0 },
7099 { PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0 },
7100 { PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0 },
7101 { PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0 },
7102 { PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0 },
7103 { PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0 },
7104 { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
7105 { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
7106 { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
7107 { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
7108 { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
7109 { PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0 },
7110 { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
7111 { PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0 },
7112 { PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0 },
7113 { PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0 },
7114 { PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0 },
7115 { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
7116 { PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0 },
7117 { PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0 },
7118 { PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0 },
7119 { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
7120 { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
7121 { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
7122 { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
7123 { PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0 },
7124 { PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0 },
7125 { PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0 },
7126 { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
7127 { PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0 },
7128 { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
7129 { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
7130 { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
7131 { PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0 },
7132 { PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0 },
7133 { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
7134 { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
7135 { PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0 },
7136 { PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0 },
7137 { PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0 },
7138 { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
7139 { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
7140 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
7141 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
7142 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
7143 { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
7144 { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
7145 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7146 { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
7147 { PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0 },
7148 { PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0 },
7149 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
7150 { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
7151 { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
7152 { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
7153 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
7154 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
7155 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
7156 { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
7157 { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
7158 { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
7159 { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
7160 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
7161 { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
7162 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
7163 { PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0 },
7164 { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
7165 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
7166 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
7167 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
7168 { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
7169 { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
7170 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7171 { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
7172 { PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0 },
7173 { PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0 },
7174 { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
7175 { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
7176 { PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0 },
7177 { PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0 },
7178 { PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0 },
7179 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
7180 { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
7181 { PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0 },
7182 { PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0 },
7183 { PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0 },
7184 { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
7185 { PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0 },
7186 { PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0 },
7187 { PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0 },
7188 { PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0 },
7189 { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
7190 { PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0 },
7191 { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
7192 { PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0 },
7193 { PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0 },
7194 { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
7195 { PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0 },
7196 { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
7197 { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
7198 { PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0 },
7199 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
7200 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
7201 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
7202 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
7203 { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
7204 { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
7205 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
7206 { PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0 },
7207 { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
7208 { PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0 },
7209 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
7210 { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
7211 { PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0 },
7212 { PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0 },
7213 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
7214 { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
7215 { PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0 },
7216 { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
7217 { PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0 },
7218 { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
7219 { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
7220 { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
7221 { PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0 },
7222 { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
7223 { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
7224 { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
7225 { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
7226 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7227 { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
7228 { PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0 },
7229 { PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0 },
7230 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
7231 { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
7232 { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
7233 { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
7234 { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
7235 { PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0 },
7236 { PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0 },
7237 { PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0 },
7238 { PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0 },
7239 { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
7240 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
7241 { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
7242 { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
7243 { PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0 },
7244 { PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0 },
7245 { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
7246 { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
7247 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
7248 { PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0 },
7249 { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
7250 { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
7251 { PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0 },
7252 { PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0 },
7253 { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
7254 { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
7255 { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
7256 { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
7257 { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
7258 { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
7259 { PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0 },
7260 { PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0 },
7261 { PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0 },
7262 { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
7263 { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
7264 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
7265 { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
7266 { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
7267 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
7268 { PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0 },
7269 { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
7270 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
7271 { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
7272 { PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0 },
7273 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7274 { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
7275 { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
7276 { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
7277 { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
7278 { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
7279 { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
7280 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
7281 { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
7282 { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
7283 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
7284 { PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0 },
7285 { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
7286 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
7287 { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
7288 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
7289 { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
7290 { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
7291 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7292 { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
7293 { PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0 },
7294 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
7295 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
7296 { PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0 },
7297 { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
7298 { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
7299 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
7300 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
7301 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7302 { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
7303 { PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0 },
7304 { PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0 },
7305 { PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0 },
7306 { PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0 },
7307 { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
7308 { PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0 },
7309 { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
7310 { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
7311 { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
7312 { PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0 },
7313 { PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0 },
7314 { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
7315 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
7316 { PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0 },
7317 { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
7318 { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
7319 { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
7320 { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
7321 { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
7322 { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
7323 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
7324 { PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0 },
7325 { PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0 },
7326 { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
7327 { PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0 },
7328 { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
7329 { PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0 },
7330 { PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0 },
7331 { PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0 },
7332 { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
7333 { PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0 },
7334 { PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0 },
7335 { PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0 },
7336 { PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0 },
7337 { PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0 },
7338 { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
7339 { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
7340 { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
7341 { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
7342 { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
7343 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
7344 { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
7345 { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
7346 { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
7347 { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
7348 { PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0 },
7349 { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
7350 { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
7351 { PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0 },
7352 { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
7353 { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
7354 { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
7355 { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
7356 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
7357 { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
7358 { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
7359 { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
7360 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
7361 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
7362 { PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0 },
7363 { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
7364 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
7365 { PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0 },
7366 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7367 { PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0 },
7368 { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
7369 { PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0 },
7370 { PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0 },
7371 { PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0 },
7372 { PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0 },
7373 { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
7374 { PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0 },
7375 { PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0 },
7376 { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
7377 { PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0 },
7378 { PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0 },
7379 { PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0 },
7380 { PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0 },
7381 { PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0 },
7382 { PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0 },
7383 { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
7384 { PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0 },
7385 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7386 { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
7387 { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
7388 { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
7389 { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
7390 { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
7391 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
7392 { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
7393 { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
7394 { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
7395 { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
7396 { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
7397 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
7398 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
7399 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
7400 { PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0 },
7401 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7402 { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
7403 { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
7404 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7405 { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
7406 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
7407 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7408 { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
7409 { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
7410 { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
7411 { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
7412 { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
7413 { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
7414 { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
7415 { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
7416 { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
7417 { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
7418 { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
7419 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
7420 { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
7421 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
7422 { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
7423 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
7424 { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
7425 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7426 { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
7427 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
7428 { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
7429 { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
7430 { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
7431 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
7432 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7433 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7434 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7435 { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
7436 { PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0 },
7437 { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
7438 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
7439 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7440 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7441 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7442 { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
7443 { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
7444 { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
7445 { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
7446 { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
7447 { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
7448 { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
7449 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
7450 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
7451 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
7452 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
7453 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7454 { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
7455 { PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0 },
7456 { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
7457 { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
7458 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
7459 { PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0 },
7460 { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
7461 { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
7462 { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
7463 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
7464 { PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0 },
7465 { PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0 },
7466 { PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0 },
7467 { PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0 },
7468 { PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0 },
7469 { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
7470 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
7471 { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
7472 { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
7473 { PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0 },
7474 { PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0 },
7475 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
7476 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
7477 { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
7478 { PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0 },
7479 { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
7480 { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
7481 { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
7482 { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
7483 { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
7484 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
7485 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
7486 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
7487 { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
7488 { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
7489 { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
7490 { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
7491 { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
7492 { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
7493 { PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0 },
7494 { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
7495 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
7496 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
7497 { PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0 },
7498 { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
7499 { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
7500 { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
7501 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
7502 { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
7503 { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
7504 { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
7505 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
7506 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
7507 { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
7508 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
7509 { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
7510 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
7511 { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
7512 { PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0 },
7513 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7514 { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
7515 { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
7516 { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
7517 { PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0 },
7518 { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
7519 { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
7520 { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
7521 { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
7522 { PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0 },
7523 { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
7524 { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
7525 { PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0 },
7526 { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
7527 { PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0 },
7528 { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
7529 { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
7530 { PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0 },
7531 { PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0 },
7532 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
7533 { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
7534 { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
7535 { PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0 },
7536 { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
7537 { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
7538 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
7539 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
7540 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
7541 { PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0 },
7542 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
7543 { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
7544 { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
7545 { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
7546 { PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0 },
7547 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
7548 { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
7549 { PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0 },
7550 { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
7551 { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
7552 { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
7553 { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
7554 { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
7555 { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
7556 { PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0 },
7557 { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
7558 { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
7559 { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
7560 { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
7561 { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
7562 { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
7563 { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
7564 { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
7565 { PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0 },
7566 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7567 { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
7568 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
7569 { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
7570 { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
7571 { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
7572 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
7573 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7574 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7575 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7576 { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
7577 { PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0 },
7578 { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
7579 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
7580 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7581 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7582 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7583 { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
7584 { PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0 },
7585 { PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0 },
7586 { PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0 },
7587 { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
7588 { PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0 },
7589 { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
7590 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
7591 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
7592 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
7593 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
7594 { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
7595 { PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0 },
7596 { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
7597 { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
7598 { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
7599 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
7600 { PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0 },
7601 { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
7602 { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
7603 { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
7604 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
7605 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
7606 { PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0 },
7607 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
7608 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
7609 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
7610 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
7611 { PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0 },
7612 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
7613 { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
7614 { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
7615 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
7616 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
7617 { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
7618 { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
7619 { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
7620 { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
7621 { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
7622 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
7623 { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
7624 { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
7625 { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
7626 { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
7627 { PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0 },
7628 { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
7629 { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
7630 { PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0 },
7631 { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
7632 { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
7633 { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
7634 { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
7635 { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
7636 { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
7637 { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
7638 { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
7639 { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
7640 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
7641 { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
7642 { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
7643 { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
7644 { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
7645 { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
7646 { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
7647 { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
7648 { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
7649 { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
7650 { PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0 },
7651 { PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0 },
7652 { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
7653 { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
7654 { PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0 },
7655 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7656 { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
7657 { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
7658 { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
7659 { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
7660 { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
7661 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
7662 { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
7663 { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
7664 { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
7665 { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
7666 { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
7667 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
7668 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
7669 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
7670 { PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0 },
7671 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
7672 { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
7673 { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
7674 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7675 { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
7676 { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
7677 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7678 { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
7679 { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
7680 { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
7681 { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
7682 { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
7683 { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
7684 { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
7685 { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
7686 { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
7687 { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
7688 { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
7689 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
7690 { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
7691 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
7692 { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
7693 { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
7694 { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
7695 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7696 { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
7697 { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
7698 { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
7699 { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
7700 { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
7701 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
7702 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7703 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7704 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7705 { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
7706 { PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0 },
7707 { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
7708 { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
7709 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7710 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7711 { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
7712 { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
7713 { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
7714 { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
7715 { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
7716 { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
7717 { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
7718 { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
7719 { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
7720 { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
7721 { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
7722 { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
7723 { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
7724 { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
7725 { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
7726 { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
7727 { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
7728 { PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0 },
7729 { PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0 },
7730 { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
7731 { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
7732 { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
7733 { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
7734 { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
7735 { PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0 },
7736 { PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0 },
7737 { PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0 },
7738 { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
7739 { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
7740 { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
7741 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
7742 { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
7743 { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
7744 { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
7745 { PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0 },
7746 { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
7747 { PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0 },
7748 { PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0 },
7749 { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
7750 { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
7751 { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
7752 { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
7753 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
7754 { PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0 },
7755 { PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0 },
7756 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
7757 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
7758 { PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0 },
7759 { PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0 },
7760 { PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0 },
7761 { PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0 },
7762 { PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0 },
7763 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7764 { PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0 },
7765 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
7766 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
7767 { PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0 },
7768 { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
7769 { PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0 },
7770 { PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0 },
7771 { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
7772 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
7773 { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
7774 { PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0 },
7775 { PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0 },
7776 { PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0 },
7777 { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
7778 { PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0 },
7779 { PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0 },
7780 { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
7781 { PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0 },
7782 { PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0 },
7783 { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
7784 { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
7785 { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
7786 { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
7787 { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
7788 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7789 { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
7790 { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
7791 { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
7792 { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
7793 { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
7794 { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
7795 { PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0 },
7796 { PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0 },
7797 { PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0 },
7798 { PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0 },
7799 { PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0 },
7800 { PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0 },
7801 { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
7802 { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
7803 { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
7804 { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
7805 { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
7806 { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
7807 { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
7808 { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
7809 { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
7810 { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
7811 { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
7812 { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
7813 { PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0 },
7814 { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
7815 { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
7816 { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
7817 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7818 { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
7819 { PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0 },
7820 { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
7821 { PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0 },
7822 { PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0 },
7823 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
7824 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
7825 { PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0 },
7826 { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
7827 { PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0 },
7828 { PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0 },
7829 { PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0 },
7830 { PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0 },
7831 { PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0 },
7832 { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
7833 { PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0 },
7834 { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
7835 { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
7836 { PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0 },
7837 { PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0 },
7838 { PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0 },
7839 { PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0 },
7840 { PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0 },
7841 { PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0 },
7842 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7843 { PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0 },
7844 { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
7845 { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
7846 { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
7847 { PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0 },
7848 { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
7849 { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
7850 { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
7851 { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
7852 { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
7853 { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
7854 { PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0 },
7855 { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
7856 { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
7857 { PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0 },
7858 { PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0 },
7859 { PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0 },
7860 { PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0 },
7861 { PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0 },
7862 { PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0 },
7863 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7864 { PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0 },
7865 { PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0 },
7866 { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
7867 { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
7868 { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
7869 { PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0 },
7870 { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
7871 { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
7872 { PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0 },
7873 { PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0 },
7874 { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
7875 { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
7876 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7877 { PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0 },
7878 { PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0 },
7879 { PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0 },
7880 { PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0 },
7881 { PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0 },
7882 { PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0 },
7883 { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
7884 { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
7885 { PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0 },
7886 { PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0 },
7887 { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
7888 { PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0 },
7889 { PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0 },
7890 { PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0 },
7891 { PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0 },
7892 { PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0 },
7893 { PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0 },
7894 { PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0 },
7895 { PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0 },
7896 { PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0 },
7897 { PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0 },
7898 { PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0 },
7899 { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
7900 { PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0 },
7901 { PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0 },
7902 { PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0 },
7903 { PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0 },
7904 { PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0 },
7905 { PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0 },
7906 { PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0 },
7907 { PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0 },
7908 { PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0 },
7909 { PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0 },
7910 { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
7911 { PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0 },
7912 { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
7913 { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
7914 { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
7915 { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
7916 { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
7917 { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
7918 { PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0 },
7919 { PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0 },
7920 { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
7921 { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
7922 { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
7923 { PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0 },
7924 { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
7925 { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
7926 { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
7927 { PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0 },
7928 { PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0 },
7929 { PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0 },
7930 { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
7931 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
7932 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
7933 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
7934 { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
7935 { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
7936 { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
7937 { PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0 },
7938 { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
7939 { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
7940 { PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0 },
7941 { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
7942 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7943 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7944 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7945 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7946 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7947 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7948 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7949 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7950 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7951 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
7952 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
7953 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
7954 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
7955 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
7956 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
7957 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
7958 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
7959 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
7960 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
7961 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
7962 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
7963 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
7964 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
7965 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
7966 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
7967 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
7968 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
7969 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
7970 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
7971 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
7972 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
7973 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
7974 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
7975 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
7976 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
7977 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
7978 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
7979 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
7980 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
7981 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
7982 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
7983 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
7984 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
7985 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
7986 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
7987 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
7988 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
7989 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
7990 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
7991 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
7992 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
7993 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
7994 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
7995 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
7996 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
7997 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
7998 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
7999 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8000 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8001 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8002 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8003 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8004 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8005 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8006 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8007 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8008 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8009 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8010 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8011 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8012 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8013 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8014 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8015 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8016 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8017 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8018 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8019 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8020 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8021 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8022 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8023 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8024 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8025 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8026 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8027 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8028 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8029 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8030 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8031 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8032 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8033 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8034 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8035 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8036 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8037 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8038 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8039 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8040 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8041 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8042 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8043 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8044 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8045 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8046 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8047 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8048 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8049 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8050 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8051 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8052 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8053 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8054 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8055 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8056 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8057 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8058 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8059 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8060 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8061 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8062 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8063 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8064 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8065 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8066 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8067 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8068 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8069 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8070 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8071 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8072 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8073 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8074 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8075 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8076 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8077 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8078 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8079 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8080 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8081 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8082 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8083 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8084 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8085 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8086 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8087 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8088 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8089 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8090 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8091 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8092 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8093 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8094 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8095 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8096 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8097 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8098 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8099 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8100 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8101 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8102 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8103 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8104 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8105 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8106 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8107 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8108 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8109 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8110 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8111 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8112 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8113 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8114 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8115 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8116 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8117 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8118 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8119 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8120 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8121 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8122 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8123 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8124 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8125 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8126 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8127 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8128 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8129 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8130 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8131 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8132 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8133 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8134 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8135 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8136 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8137 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8138 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8139 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8140 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8141 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8142 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8143 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8144 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8145 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8146 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8147 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8148 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8149 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8150 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8151 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8152 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8153 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8154 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8155 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8156 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8157 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8158 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8159 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8160 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8161 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8162 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8163 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8164 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8165 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8166 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8167 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8168 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8169 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8170 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8171 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8172 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8173 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8174 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8175 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8176 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8177 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8178 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8179 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8180 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8181 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8182 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8183 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8184 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8185 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8186 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8187 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8188 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8189 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8190 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8191 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8192 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8193 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8194 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8195 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8196 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8197 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8198 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8199 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8200 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8201 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8202 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8203 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8204 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8205 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8206 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8207 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8208 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8209 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8210 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8211 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8212 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8213 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8214 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8215 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8216 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8217 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8218 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8219 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8220 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8221 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8222 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8223 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8224 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8225 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8226 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8227 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8228 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8229 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8230 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8231 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8232 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8233 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8234 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8235 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8236 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8237 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8238 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8239 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8240 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8241 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8242 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8243 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8244 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8245 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8246 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8247 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8248 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8249 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8250 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8251 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8252 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8253 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8254 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8255 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8256 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8257 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8258 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8259 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8260 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8261 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8262 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8263 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8264 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8265 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8266 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8267 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8268 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8269 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8270 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8271 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8272 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8273 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8274 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8275 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8276 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8277 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8278 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8279 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8280 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8281 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8282 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8283 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8284 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8285 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8286 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8287 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8288 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8289 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8290 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8291 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8292 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8293 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8294 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8295 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8296 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8297 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8298 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8299 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8300 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8301 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8302 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8303 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8304 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8305 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8306 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8307 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8308 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8309 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8310 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8311 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8312 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8313 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8314 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8315 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8316 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8317 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8318 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8319 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8320 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8321 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8322 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8323 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8324 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8325 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8326 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8327 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8328 { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
8329 { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
8330 { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
8331 { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
8332 { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
8333 { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
8334 { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
8335 { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
8336 { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
8337 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8338 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8339 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8340 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8341 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8342 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8343 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8344 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8345 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8346 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8347 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8348 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8349 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8350 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8351 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8352 { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
8353 { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
8354 { PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO },
8355 { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
8356 { PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0 },
8357 { PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0 },
8358 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8359 { PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0 },
8360 { PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0 },
8361 { PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0 },
8362 { PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0 },
8363 { PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0 },
8364 { PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0 },
8365 { PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0 },
8366 { PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0 },
8367 { PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0 },
8368 { PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0 },
8369 { PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0 },
8370 { PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0 },
8371 { PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0 },
8372 { PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0 },
8373 { PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0 },
8374 { PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0 },
8375 { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
8376 { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
8377 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8378 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8379 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8380 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8381 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8382 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8383 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8384 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8385 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8386 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8387 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8388 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8389 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8390 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8391 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8392 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8393 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8394 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8395 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8396 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8397 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8398 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8399 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8400 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8401 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8402 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8403 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8404 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8405 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8406 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8407 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8408 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8409 { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
8410 { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
8411 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8412 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8413 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8414 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8415 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8416 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8417 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8418 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8419 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8420 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8421 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8422 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8423 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8424 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8425 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8426 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8427 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8428 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8429 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8430 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8431 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8432 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8433 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8434 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8435 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8436 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8437 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8438 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8439 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8440 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8441 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8442 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8443 { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
8444 { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
8445 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8446 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8447 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8448 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8449 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8450 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8451 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8452 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8453 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8454 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8455 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8456 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8457 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8458 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8459 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8460 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8461 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8462 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8463 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8464 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8465 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8466 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8467 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8468 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8469 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8470 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8471 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8472 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8473 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8474 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8475 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8476 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8477 { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
8478 { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
8479 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8480 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8481 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8482 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8483 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8484 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8485 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8486 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8487 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8488 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8489 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8490 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8491 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8492 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8493 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8494 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8495 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8496 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8497 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8498 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8499 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8500 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8501 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8502 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8503 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8504 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8505 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8506 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8507 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8508 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8509 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8510 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8511 { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
8512 { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
8513 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8514 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8515 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8516 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8517 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8518 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8519 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8520 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8521 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8522 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8523 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8524 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8525 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8526 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8527 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8528 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8529 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8530 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8531 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8532 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8533 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8534 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8535 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8536 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8537 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8538 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8539 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8540 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8541 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8542 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8543 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8544 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8545 { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
8546 { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
8547 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8548 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8549 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8550 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8551 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8552 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8553 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8554 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8555 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8556 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8557 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8558 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8559 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8560 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8561 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8562 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8563 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8564 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8565 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8566 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8567 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8568 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8569 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8570 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8571 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8572 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8573 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8574 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8575 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8576 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8577 { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
8578 { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
8579 { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
8580 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8581 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8582 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8583 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8584 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8585 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8586 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8587 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8588 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8589 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8590 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8591 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8592 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8593 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8594 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8595 { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
8596 { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
8597 { PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO },
8598 { PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0 },
8599 { PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0 },
8600 { PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0 },
8601 { PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0 },
8602 { PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0 },
8603 { PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0 },
8604 { PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0 },
8605 { PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0 },
8606 { PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0 },
8607 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8608 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8609 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8610 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8611 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8612 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8613 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8614 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8615 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8616 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8617 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8618 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8619 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8620 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8621 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8622 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8623 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8624 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8625 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8626 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8627 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8628 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8629 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8630 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8631 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8632 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8633 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8634 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8635 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8636 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8637 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8638 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8639 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8640 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8641 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8642 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8643 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8644 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8645 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8646 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8647 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8648 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8649 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8650 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8651 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8652 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8653 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8654 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8655 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8656 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8657 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8658 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8659 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8660 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8661 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8662 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8663 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8664 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8665 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8666 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8667 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8668 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8669 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8670 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8671 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8672 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8673 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8674 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8675 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8676 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8677 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8678 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8679 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8680 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8681 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8682 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8683 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8684 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8685 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8686 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8687 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8688 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8689 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8690 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8691 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8692 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8693 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8694 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8695 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8696 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8697 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8698 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8699 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8700 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8701 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8702 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8703 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8704 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8705 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8706 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8707 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8708 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8709 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8710 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8711 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8712 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8713 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8714 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8715 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8716 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8717 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8718 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8719 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8720 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8721 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8722 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8723 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8724 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8725 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8726 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8727 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8728 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8729 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8730 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8731 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8732 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8733 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8734 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8735 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8736 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8737 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8738 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8739 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8740 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8741 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8742 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8743 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8744 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8745 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8746 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8747 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8748 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8749 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8750 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8751 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8752 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8753 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8754 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8755 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8756 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8757 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8758 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8759 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8760 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8761 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8762 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8763 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8764 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8765 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8766 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8767 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8768 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8769 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8770 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8771 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8772 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8773 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8774 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8775 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8776 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8777 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8778 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8779 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8780 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8781 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8782 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8783 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8784 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8785 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8786 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8787 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8788 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8789 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8790 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8791 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8792 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8793 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8794 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8795 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8796 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8797 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8798 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8799 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8800 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8801 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8802 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8803 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8804 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8805 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8806 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8807 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8808 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8809 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8810 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8811 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8812 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8813 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8814 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8815 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8816 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8817 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8818 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8819 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8820 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8821 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8822 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8823 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8824 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8825 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8826 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8827 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8828 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8829 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8830 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8831 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8832 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8833 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8834 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8835 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8836 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8837 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8838 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8839 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8840 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8841 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8842 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8843 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8844 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8845 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8846 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8847 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8848 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8849 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8850 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8851 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8852 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8853 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8854 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8855 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8856 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8857 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8858 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8859 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8860 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8861 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8862 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8863 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8864 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8865 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8866 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8867 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8868 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8869 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8870 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8871 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8872 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8873 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8874 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8875 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8876 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8877 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8878 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8879 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8880 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8881 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8882 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8883 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8884 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8885 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8886 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8887 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8888 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8889 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8890 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8891 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8892 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8893 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8894 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8895 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8896 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8897 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8898 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8899 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8900 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8901 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8902 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8903 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8904 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8905 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8906 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8907 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8908 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8909 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8910 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8911 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8912 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8913 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8914 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8915 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8916 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8917 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8918 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8919 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8920 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8921 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8922 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8923 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8924 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8925 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8926 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8927 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8928 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8929 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8930 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8931 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8932 { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
8933 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8934 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8935 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8936 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8937 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8938 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8939 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8940 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8941 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8942 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8943 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8944 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8945 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8946 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8947 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8948 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8949 { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
8950 { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
8951 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8952 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8953 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8954 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8955 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8956 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8957 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8958 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8959 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8960 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8961 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8962 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8963 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8964 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8965 { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
8966 { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
8967 { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
8968 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8969 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8970 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8971 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8972 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8973 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8974 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8975 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8976 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8977 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8978 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8979 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8980 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8981 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8982 { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
8983 { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
8984 { PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO },
8985 { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
8986 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8987 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8988 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8989 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8990 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8991 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8992 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8993 { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
8994 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
8995 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
8996 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
8997 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
8998 { PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0 },
8999 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9000 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9001 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9002 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9003 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9004 { PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0 },
9005 { PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0 },
9006 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9007 { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
9008 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9009 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9010 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9011 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9012 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9013 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9014 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9015 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9016 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9017 { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
9018 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9019 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9020 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9021 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9022 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9023 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9024 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9025 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9026 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9027 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9028 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9029 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9030 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9031 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9032 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9033 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9034 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9035 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9036 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9037 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9038 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9039 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9040 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9041 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9042 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9043 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9044 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9045 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9046 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9047 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9048 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9049 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9050 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9051 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9052 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9053 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9054 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9055 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9056 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9057 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9058 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9059 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9060 { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
9061 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9062 { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
9063 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9064 { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
9065 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9066 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9067 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9068 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9069 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9070 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9071 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9072 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9073 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9074 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9075 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9076 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9077 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9078 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9079 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9080 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9081 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9082 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9083 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9084 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9085 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9086 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9087 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9088 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9089 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9090 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9091 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9092 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9093 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9094 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9095 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9096 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9097 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9098 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9099 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9100 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9101 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9102 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9103 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9104 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9105 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9106 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9107 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9108 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9109 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9110 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9111 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9112 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9113 { PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0 },
9114 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9115 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9116 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9117 { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
9118 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9119 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9120 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9121 { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
9122 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9123 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9124 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9125 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9126 { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
9127 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9128 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9129 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9130 { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
9131 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9132 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9133 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9134 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9135 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9136 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9137 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9138 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9139 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9140 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9141 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9142 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9143 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9144 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9145 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9146 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9147 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9148 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9149 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9150 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9151 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9152 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9153 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9154 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9155 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9156 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9157 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9158 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9159 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9160 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9161 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9162 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9163 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9164 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9165 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9166 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9167 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9168 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9169 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9170 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9171 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9172 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9173 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9174 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9175 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9176 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9177 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9178 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9179 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9180 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9181 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9182 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9183 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9184 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9185 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9186 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9187 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9188 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9189 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9190 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9191 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9192 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9193 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9194 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9195 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9196 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9197 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9198 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9199 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9200 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9201 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9202 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9203 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9204 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9205 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9206 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9207 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9208 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9209 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9210 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9211 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9212 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9213 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9214 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9215 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9216 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9217 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9218 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9219 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9220 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9221 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9222 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9223 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9224 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9225 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9226 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9227 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9228 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9229 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9230 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9231 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9232 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9233 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9234 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9235 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9236 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9237 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9238 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9239 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9240 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9241 { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
9242 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9243 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9244 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9245 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9246 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9247 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9248 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9249 { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
9250 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9251 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9252 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9253 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9254 { PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0 },
9255 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9256 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9257 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9258 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9259 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9260 { PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0 },
9261 { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
9262 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9263 { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
9264 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9265 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9266 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9267 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9268 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9269 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9270 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9271 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9272 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9273 { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
9274 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9275 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9276 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9277 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9278 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9279 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9280 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9281 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9282 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9283 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9284 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9285 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9286 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9287 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9288 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9289 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9290 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9291 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9292 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9293 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9294 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9295 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9296 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9297 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9298 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9299 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9300 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9301 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9302 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9303 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9304 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9305 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9306 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9307 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9308 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9309 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9310 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9311 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9312 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9313 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9314 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9315 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9316 { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
9317 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9318 { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
9319 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9320 { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
9321 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9322 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9323 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9324 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9325 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9326 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9327 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9328 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9329 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9330 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9331 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9332 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9333 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9334 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9335 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9336 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9337 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9338 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9339 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9340 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9341 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9342 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9343 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9344 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9345 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9346 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9347 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9348 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9349 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9350 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9351 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9352 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9353 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9354 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9355 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9356 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9357 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9358 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9359 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9360 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9361 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9362 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9363 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9364 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9365 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9366 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9367 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9368 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9369 { PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0 },
9370 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9371 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9372 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9373 { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
9374 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9375 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9376 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9377 { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
9378 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9379 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9380 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9381 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9382 { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
9383 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9384 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9385 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9386 { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
9387 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9388 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9389 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9390 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9391 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9392 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9393 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9394 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9395 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9396 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9397 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9398 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9399 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9400 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9401 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9402 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9403 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9404 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9405 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9406 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9407 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9408 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9409 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9410 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9411 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9412 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9413 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9414 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9415 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9416 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9417 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9418 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9419 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9420 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9421 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9422 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9423 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9424 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9425 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9426 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9427 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9428 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9429 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9430 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9431 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9432 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9433 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9434 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9435 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9436 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9437 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9438 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9439 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9440 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9441 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9442 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9443 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9444 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9445 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9446 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9447 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9448 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9449 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9450 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9451 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9452 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9453 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9454 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9455 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9456 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9457 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9458 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9459 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9460 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9461 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9462 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9463 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9464 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9465 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9466 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9467 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9468 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9469 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9470 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9471 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9472 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9473 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9474 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9475 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9476 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9477 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9478 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9479 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9480 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9481 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9482 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9483 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9484 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9485 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9486 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9487 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9488 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9489 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9490 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9491 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9492 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9493 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9494 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9495 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9496 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9497 { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
9498 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9499 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9500 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9501 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9502 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9503 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9504 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9505 { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
9506 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9507 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9508 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9509 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9510 { PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0 },
9511 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9512 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9513 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9514 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9515 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9516 { PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0 },
9517 { PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0 },
9518 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9519 { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
9520 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9521 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9522 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9523 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9524 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9525 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9526 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9527 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9528 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9529 { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
9530 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9531 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9532 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9533 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9534 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9535 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9536 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9537 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9538 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9539 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9540 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9541 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9542 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9543 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9544 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9545 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9546 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9547 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9548 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9549 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9550 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9551 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9552 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9553 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9554 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9555 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9556 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9557 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9558 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9559 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9560 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9561 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9562 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9563 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9564 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9565 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9566 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9567 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9568 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9569 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9570 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9571 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9572 { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
9573 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9574 { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
9575 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9576 { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
9577 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9578 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9579 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9580 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9581 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9582 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9583 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9584 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9585 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9586 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9587 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9588 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9589 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9590 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9591 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9592 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9593 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9594 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9595 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9596 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9597 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9598 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9599 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9600 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9601 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9602 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9603 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9604 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9605 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9606 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9607 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9608 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9609 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9610 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9611 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9612 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9613 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9614 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9615 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9616 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9617 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9618 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9619 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9620 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9621 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9622 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9623 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9624 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9625 { PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0 },
9626 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9627 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9628 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9629 { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
9630 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9631 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9632 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9633 { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
9634 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9635 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9636 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9637 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9638 { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
9639 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9640 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9641 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9642 { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
9643 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9644 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9645 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9646 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9647 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9648 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9649 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9650 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9651 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9652 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9653 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9654 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9655 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9656 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9657 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9658 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9659 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9660 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9661 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9662 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9663 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9664 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9665 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9666 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9667 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9668 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9669 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9670 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9671 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9672 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9673 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9674 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9675 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9676 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9677 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9678 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9679 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9680 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9681 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9682 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9683 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9684 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9685 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9686 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9687 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9688 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9689 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9690 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9691 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9692 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9693 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9694 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9695 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9696 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9697 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9698 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9699 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9700 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9701 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9702 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9703 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9704 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9705 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9706 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9707 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9708 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9709 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9710 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9711 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9712 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9713 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9714 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9715 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9716 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9717 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9718 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9719 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9720 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9721 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9722 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9723 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9724 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9725 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9726 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9727 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9728 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9729 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9730 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9731 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9732 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9733 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9734 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9735 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9736 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9737 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9738 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9739 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9740 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9741 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9742 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9743 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9744 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9745 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9746 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9747 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9748 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9749 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9750 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9751 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9752 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9753 { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
9754 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9755 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9756 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9757 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9758 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9759 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9760 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9761 { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
9762 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9763 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9764 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9765 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9766 { PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0 },
9767 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9768 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9769 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9770 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9771 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9772 { PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0 },
9773 { PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0 },
9774 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9775 { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
9776 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9777 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9778 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9779 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9780 { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
9781 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9782 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9783 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9784 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9785 { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
9786 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9787 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9788 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9789 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9790 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9791 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9792 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9793 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9794 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9795 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9796 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9797 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9798 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9799 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9800 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9801 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9802 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9803 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9804 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9805 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9806 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9807 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9808 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9809 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9810 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9811 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9812 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9813 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9814 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9815 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9816 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9817 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9818 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9819 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9820 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9821 { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
9822 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9823 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9824 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9825 { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
9826 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9827 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9828 { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
9829 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9830 { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
9831 { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
9832 { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
9833 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9834 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9835 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9836 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9837 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9838 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9839 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9840 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9841 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9842 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9843 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9844 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9845 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9846 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9847 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9848 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9849 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9850 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9851 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9852 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9853 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9854 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9855 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9856 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9857 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9858 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9859 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9860 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9861 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9862 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9863 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9864 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9865 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9866 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9867 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9868 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9869 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9870 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9871 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9872 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9873 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9874 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9875 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9876 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9877 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9878 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9879 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9880 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9881 { PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0 },
9882 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9883 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9884 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9885 { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
9886 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9887 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9888 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9889 { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
9890 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9891 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9892 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9893 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9894 { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
9895 { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
9896 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9897 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9898 { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
9899 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9900 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9901 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9902 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9903 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9904 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9905 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9906 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9907 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9908 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9909 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9910 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9911 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9912 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9913 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9914 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9915 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9916 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9917 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9918 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9919 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9920 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9921 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9922 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9923 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9924 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9925 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9926 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9927 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9928 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9929 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9930 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9931 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9932 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9933 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9934 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9935 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9936 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9937 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9938 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9939 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9940 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9941 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9942 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9943 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9944 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9945 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9946 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9947 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9948 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9949 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9950 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9951 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9952 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9953 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9954 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9955 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9956 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9957 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9958 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9959 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9960 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9961 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9962 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9963 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9964 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9965 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9966 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9967 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9968 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9969 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9970 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9971 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9972 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9973 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9974 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9975 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9976 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9977 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9978 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9979 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9980 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9981 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9982 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9983 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9984 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9985 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9986 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9987 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9988 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9989 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9990 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9991 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9992 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9993 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9994 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9995 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9996 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9997 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9998 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
9999 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10000 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10001 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10002 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10003 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10004 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10005 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10006 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10007 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10008 { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
10009 { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
10010 { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
10011 { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
10012 { PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR },
10013 { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
10014 { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
10015 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
10016 { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
10017 { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
10018 { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
10019 { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
10020 { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
10021 { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
10022 { PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR },
10023 { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
10024 { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
10025 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
10026 { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
10027 { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
10028 { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
10029 { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
10030 { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
10031 { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
10032 { PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR },
10033 { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
10034 { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
10035 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
10036 { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
10037 { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
10038 { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
10039 { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
10040 { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
10041 { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
10042 { PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR },
10043 { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
10044 { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
10045 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
10046 { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
10047 { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
10048 { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
10049 { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
10050 { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
10051 { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
10052 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10053 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10054 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10055 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10056 { PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL },
10057 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10058 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10059 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10060 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10061 { PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL },
10062 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10063 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10064 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10065 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10066 { PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL },
10067 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10068 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10069 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10070 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10071 { PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL },
10072 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10073 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10074 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10075 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10076 { PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL },
10077 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10078 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10079 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10080 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10081 { PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL },
10082 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10083 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10084 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10085 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10086 { PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL },
10087 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10088 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10089 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10090 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10091 { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
10092 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10093 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10094 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10095 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10096 { PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL },
10097 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10098 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10099 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10100 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10101 { PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL },
10102 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10103 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10104 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10105 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10106 { PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL },
10107 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10108 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10109 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10110 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10111 { PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL },
10112 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10113 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10114 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10115 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10116 { PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL },
10117 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10118 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10119 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10120 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10121 { PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL },
10122 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10123 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10124 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10125 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10126 { PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL },
10127 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10128 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10129 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10130 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10131 { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
10132 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10133 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10134 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10135 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10136 { PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL },
10137 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10138 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10139 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10140 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10141 { PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL },
10142 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10143 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10144 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10145 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10146 { PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL },
10147 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10148 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10149 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10150 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10151 { PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL },
10152 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10153 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10154 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10155 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10156 { PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL },
10157 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10158 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10159 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10160 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10161 { PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL },
10162 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10163 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10164 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10165 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10166 { PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL },
10167 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10168 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10169 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10170 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10171 { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
10172 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10173 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10174 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10175 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10176 { PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL },
10177 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10178 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10179 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10180 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10181 { PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL },
10182 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10183 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10184 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10185 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10186 { PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL },
10187 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10188 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10189 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10190 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10191 { PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL },
10192 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10193 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10194 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10195 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10196 { PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL },
10197 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10198 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10199 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10200 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10201 { PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL },
10202 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10203 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10204 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10205 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10206 { PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL },
10207 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10208 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10209 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10210 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10211 { PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL },
10212 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10213 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10214 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10215 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10216 { PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL },
10217 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10218 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10219 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10220 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10221 { PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL },
10222 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10223 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10224 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10225 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10226 { PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL },
10227 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10228 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10229 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10230 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10231 { PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL },
10232 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10233 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10234 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10235 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10236 { PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL },
10237 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10238 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10239 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10240 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10241 { PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL },
10242 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10243 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10244 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10245 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10246 { PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL },
10247 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10248 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10249 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10250 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10251 { PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL },
10252 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10253 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10254 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10255 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10256 { PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL },
10257 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10258 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10259 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10260 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10261 { PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL },
10262 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10263 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10264 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10265 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10266 { PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL },
10267 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10268 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10269 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10270 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10271 { PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL },
10272 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10273 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10274 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10275 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10276 { PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL },
10277 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10278 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10279 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10280 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10281 { PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL },
10282 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10283 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10284 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10285 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10286 { PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL },
10287 { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
10288 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
10289 { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
10290 { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
10291 { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
10292 { PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
10293 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
10294 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
10295 { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
10296 { PwrCmdEnd, 0x00000000, 0x00000000 },
10297};
10298
10299#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
new file mode 100644
index 000000000000..91795efe1336
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -0,0 +1,385 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _HARDWARE_MANAGER_H_
24#define _HARDWARE_MANAGER_H_
25
26
27
28struct pp_hwmgr;
29struct pp_hw_power_state;
30struct pp_power_state;
31enum amd_dpm_forced_level;
32struct PP_TemperatureRange;
33
34struct phm_fan_speed_info {
35 uint32_t min_percent;
36 uint32_t max_percent;
37 uint32_t min_rpm;
38 uint32_t max_rpm;
39 bool supports_percent_read;
40 bool supports_percent_write;
41 bool supports_rpm_read;
42 bool supports_rpm_write;
43};
44
45/* Automatic Power State Throttling */
46enum PHM_AutoThrottleSource
47{
48 PHM_AutoThrottleSource_Thermal,
49 PHM_AutoThrottleSource_External
50};
51
52typedef enum PHM_AutoThrottleSource PHM_AutoThrottleSource;
53
54enum phm_platform_caps {
55 PHM_PlatformCaps_AtomBiosPpV1 = 0,
56 PHM_PlatformCaps_PowerPlaySupport,
57 PHM_PlatformCaps_ACOverdriveSupport,
58 PHM_PlatformCaps_BacklightSupport,
59 PHM_PlatformCaps_ThermalController,
60 PHM_PlatformCaps_BiosPowerSourceControl,
61 PHM_PlatformCaps_DisableVoltageTransition,
62 PHM_PlatformCaps_DisableEngineTransition,
63 PHM_PlatformCaps_DisableMemoryTransition,
64 PHM_PlatformCaps_DynamicPowerManagement,
65 PHM_PlatformCaps_EnableASPML0s,
66 PHM_PlatformCaps_EnableASPML1,
67 PHM_PlatformCaps_OD5inACSupport,
68 PHM_PlatformCaps_OD5inDCSupport,
69 PHM_PlatformCaps_SoftStateOD5,
70 PHM_PlatformCaps_NoOD5Support,
71 PHM_PlatformCaps_ContinuousHardwarePerformanceRange,
72 PHM_PlatformCaps_ActivityReporting,
73 PHM_PlatformCaps_EnableBackbias,
74 PHM_PlatformCaps_OverdriveDisabledByPowerBudget,
75 PHM_PlatformCaps_ShowPowerBudgetWarning,
76 PHM_PlatformCaps_PowerBudgetWaiverAvailable,
77 PHM_PlatformCaps_GFXClockGatingSupport,
78 PHM_PlatformCaps_MMClockGatingSupport,
79 PHM_PlatformCaps_AutomaticDCTransition,
80 PHM_PlatformCaps_GeminiPrimary,
81 PHM_PlatformCaps_MemorySpreadSpectrumSupport,
82 PHM_PlatformCaps_EngineSpreadSpectrumSupport,
83 PHM_PlatformCaps_StepVddc,
84 PHM_PlatformCaps_DynamicPCIEGen2Support,
85 PHM_PlatformCaps_SMC,
86 PHM_PlatformCaps_FaultyInternalThermalReading, /* Internal thermal controller reports faulty temperature value when DAC2 is active */
87 PHM_PlatformCaps_EnableVoltageControl, /* indicates voltage can be controlled */
88 PHM_PlatformCaps_EnableSideportControl, /* indicates Sideport can be controlled */
89 PHM_PlatformCaps_VideoPlaybackEEUNotification, /* indicates EEU notification of video start/stop is required */
90 PHM_PlatformCaps_TurnOffPll_ASPML1, /* PCIE Turn Off PLL in ASPM L1 */
91 PHM_PlatformCaps_EnableHTLinkControl, /* indicates HT Link can be controlled by ACPI or CLMC overrided/automated mode. */
92 PHM_PlatformCaps_PerformanceStateOnly, /* indicates only performance power state to be used on current system. */
93 PHM_PlatformCaps_ExclusiveModeAlwaysHigh, /* In Exclusive (3D) mode always stay in High state. */
94 PHM_PlatformCaps_DisableMGClockGating, /* to disable Medium Grain Clock Gating or not */
95 PHM_PlatformCaps_DisableMGCGTSSM, /* TO disable Medium Grain Clock Gating Shader Complex control */
96 PHM_PlatformCaps_UVDAlwaysHigh, /* In UVD mode always stay in High state */
97 PHM_PlatformCaps_DisablePowerGating, /* to disable power gating */
98 PHM_PlatformCaps_CustomThermalPolicy, /* indicates only performance power state to be used on current system. */
99 PHM_PlatformCaps_StayInBootState, /* Stay in Boot State, do not do clock/voltage or PCIe Lane and Gen switching (RV7xx and up). */
100 PHM_PlatformCaps_SMCAllowSeparateSWThermalState, /* SMC use separate SW thermal state, instead of the default SMC thermal policy. */
101 PHM_PlatformCaps_MultiUVDStateSupport, /* Powerplay state table supports multi UVD states. */
102 PHM_PlatformCaps_EnableSCLKDeepSleepForUVD, /* With HW ECOs, we don't need to disable SCLK Deep Sleep for UVD state. */
103 PHM_PlatformCaps_EnableMCUHTLinkControl, /* Enable HT link control by MCU */
104 PHM_PlatformCaps_ABM, /* ABM support.*/
105 PHM_PlatformCaps_KongThermalPolicy, /* A thermal policy specific for Kong */
106 PHM_PlatformCaps_SwitchVDDNB, /* if the users want to switch VDDNB */
107 PHM_PlatformCaps_ULPS, /* support ULPS mode either through ACPI state or ULPS state */
108 PHM_PlatformCaps_NativeULPS, /* hardware capable of ULPS state (other than through the ACPI state) */
109 PHM_PlatformCaps_EnableMVDDControl, /* indicates that memory voltage can be controlled */
110 PHM_PlatformCaps_ControlVDDCI, /* Control VDDCI separately from VDDC. */
111 PHM_PlatformCaps_DisableDCODT, /* indicates if DC ODT apply or not */
112 PHM_PlatformCaps_DynamicACTiming, /* if the SMC dynamically re-programs MC SEQ register values */
113 PHM_PlatformCaps_EnableThermalIntByGPIO, /* enable throttle control through GPIO */
114 PHM_PlatformCaps_BootStateOnAlert, /* Go to boot state on alerts, e.g. on an AC->DC transition. */
115 PHM_PlatformCaps_DontWaitForVBlankOnAlert, /* Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). */
116 PHM_PlatformCaps_Force3DClockSupport, /* indicates if the platform supports force 3D clock. */
117 PHM_PlatformCaps_MicrocodeFanControl, /* Fan is controlled by the SMC microcode. */
118 PHM_PlatformCaps_AdjustUVDPriorityForSP,
119 PHM_PlatformCaps_DisableLightSleep, /* Light sleep for evergreen family. */
120 PHM_PlatformCaps_DisableMCLS, /* MC Light sleep */
121 PHM_PlatformCaps_RegulatorHot, /* Enable throttling on 'regulator hot' events. */
122 PHM_PlatformCaps_BACO, /* Support Bus Alive Chip Off mode */
123 PHM_PlatformCaps_DisableDPM, /* Disable DPM, supported from Llano */
124 PHM_PlatformCaps_DynamicM3Arbiter, /* support dynamically change m3 arbitor parameters */
125 PHM_PlatformCaps_SclkDeepSleep, /* support sclk deep sleep */
126 PHM_PlatformCaps_DynamicPatchPowerState, /* this ASIC supports to patch power state dynamically */
127 PHM_PlatformCaps_ThermalAutoThrottling, /* enabling auto thermal throttling, */
128 PHM_PlatformCaps_SumoThermalPolicy, /* A thermal policy specific for Sumo */
129 PHM_PlatformCaps_PCIEPerformanceRequest, /* support to change RC voltage */
130 PHM_PlatformCaps_BLControlledByGPU, /* support varibright */
131 PHM_PlatformCaps_PowerContainment, /* support DPM2 power containment (AKA TDP clamping) */
132 PHM_PlatformCaps_SQRamping, /* support DPM2 SQ power throttle */
133 PHM_PlatformCaps_CAC, /* support Capacitance * Activity power estimation */
134 PHM_PlatformCaps_NIChipsets, /* Northern Island and beyond chipsets */
135 PHM_PlatformCaps_TrinityChipsets, /* Trinity chipset */
136 PHM_PlatformCaps_EvergreenChipsets, /* Evergreen family chipset */
137 PHM_PlatformCaps_PowerControl, /* Cayman and beyond chipsets */
138 PHM_PlatformCaps_DisableLSClockGating, /* to disable Light Sleep control for HDP memories */
139 PHM_PlatformCaps_BoostState, /* this ASIC supports boost state */
140 PHM_PlatformCaps_UserMaxClockForMultiDisplays, /* indicates if max memory clock is used for all status when multiple displays are connected */
141 PHM_PlatformCaps_RegWriteDelay, /* indicates if back to back reg write delay is required */
142 PHM_PlatformCaps_NonABMSupportInPPLib, /* ABM is not supported in PPLIB, (moved from PPLIB to DAL) */
143 PHM_PlatformCaps_GFXDynamicMGPowerGating, /* Enable Dynamic MG PowerGating on Trinity */
144 PHM_PlatformCaps_DisableSMUUVDHandshake, /* Disable SMU UVD Handshake */
145 PHM_PlatformCaps_DTE, /* Support Digital Temperature Estimation */
146 PHM_PlatformCaps_W5100Specifc_SmuSkipMsgDTE, /* This is for the feature requested by David B., and Tonny W.*/
147 PHM_PlatformCaps_UVDPowerGating, /* enable UVD power gating, supported from Llano */
148 PHM_PlatformCaps_UVDDynamicPowerGating, /* enable UVD Dynamic power gating, supported from UVD5 */
149 PHM_PlatformCaps_VCEPowerGating, /* Enable VCE power gating, supported for TN and later ASICs */
150 PHM_PlatformCaps_SamuPowerGating, /* Enable SAMU power gating, supported for KV and later ASICs */
151 PHM_PlatformCaps_UVDDPM, /* UVD clock DPM */
152 PHM_PlatformCaps_VCEDPM, /* VCE clock DPM */
153 PHM_PlatformCaps_SamuDPM, /* SAMU clock DPM */
154 PHM_PlatformCaps_AcpDPM, /* ACP clock DPM */
155 PHM_PlatformCaps_SclkDeepSleepAboveLow, /* Enable SCLK Deep Sleep on all DPM states */
156 PHM_PlatformCaps_DynamicUVDState, /* Dynamic UVD State */
157 PHM_PlatformCaps_WantSAMClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */
158 PHM_PlatformCaps_WantUVDClkWithDummyBackEnd, /* Set UVD Clk With Dummy Back End */
159 PHM_PlatformCaps_WantVCEClkWithDummyBackEnd, /* Set VCE Clk With Dummy Back End */
160 PHM_PlatformCaps_WantACPClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */
161 PHM_PlatformCaps_OD6inACSupport, /* indicates that the ASIC/back end supports OD6 */
162 PHM_PlatformCaps_OD6inDCSupport, /* indicates that the ASIC/back end supports OD6 in DC */
163 PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */
164 PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */
165 PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */
166 PHM_PlatformCaps_DBRamping, /* for dI/dT feature */
167 PHM_PlatformCaps_TDRamping, /* for dI/dT feature */
168 PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */
169 PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */
170 PHM_PlatformCaps_FPS, /* FPS support */
171 PHM_PlatformCaps_ACP, /* ACP support */
172 PHM_PlatformCaps_SclkThrottleLowNotification, /* SCLK Throttle Low Notification */
173 PHM_PlatformCaps_XDMAEnabled, /* XDMA engine is enabled */
174 PHM_PlatformCaps_UseDummyBackEnd, /* use dummy back end */
175 PHM_PlatformCaps_EnableDFSBypass, /* Enable DFS bypass */
176 PHM_PlatformCaps_VddNBDirectRequest,
177 PHM_PlatformCaps_PauseMMSessions,
178 PHM_PlatformCaps_UnTabledHardwareInterface, /* Tableless/direct call hardware interface for CI and newer ASICs */
179 PHM_PlatformCaps_SMU7, /* indicates that vpuRecoveryBegin without SMU shutdown */
180 PHM_PlatformCaps_RevertGPIO5Polarity, /* indicates revert GPIO5 plarity table support */
181 PHM_PlatformCaps_Thermal2GPIO17, /* indicates thermal2GPIO17 table support */
182 PHM_PlatformCaps_ThermalOutGPIO, /* indicates ThermalOutGPIO support, pin number is assigned by VBIOS */
183 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock, /* Disable memory clock switch during Framelock */
184 PHM_PlatformCaps_VRHotGPIOConfigurable, /* indicates VR_HOT GPIO configurable */
185 PHM_PlatformCaps_TempInversion, /* enable Temp Inversion feature */
186 PHM_PlatformCaps_IOIC3,
187 PHM_PlatformCaps_ConnectedStandby,
188 PHM_PlatformCaps_EVV,
189 PHM_PlatformCaps_EnableLongIdleBACOSupport,
190 PHM_PlatformCaps_CombinePCCWithThermalSignal,
191 PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc,
192 PHM_PlatformCaps_StablePState,
193 PHM_PlatformCaps_OD6PlusinACSupport,
194 PHM_PlatformCaps_OD6PlusinDCSupport,
195 PHM_PlatformCaps_ODThermalLimitUnlock,
196 PHM_PlatformCaps_ReducePowerLimit,
197 PHM_PlatformCaps_ODFuzzyFanControlSupport,
198 PHM_PlatformCaps_GeminiRegulatorFanControlSupport,
199 PHM_PlatformCaps_ControlVDDGFX,
200 PHM_PlatformCaps_BBBSupported,
201 PHM_PlatformCaps_DisableVoltageIsland,
202 PHM_PlatformCaps_FanSpeedInTableIsRPM,
203 PHM_PlatformCaps_GFXClockGatingManagedInCAIL,
204 PHM_PlatformCaps_IcelandULPSSWWorkAround,
205 PHM_PlatformCaps_FPSEnhancement,
206 PHM_PlatformCaps_LoadPostProductionFirmware,
207 PHM_PlatformCaps_VpuRecoveryInProgress,
208 PHM_PlatformCaps_Falcon_QuickTransition,
209 PHM_PlatformCaps_AVFS,
210 PHM_PlatformCaps_ClockStretcher,
211 PHM_PlatformCaps_TablelessHardwareInterface,
212 PHM_PlatformCaps_EnableDriverEVV,
213 PHM_PlatformCaps_Max
214};
215
216#define PHM_MAX_NUM_CAPS_BITS_PER_FIELD (sizeof(uint32_t)*8)
217
218/* Number of uint32_t entries used by CAPS table */
219#define PHM_MAX_NUM_CAPS_ULONG_ENTRIES \
220 ((PHM_PlatformCaps_Max + ((PHM_MAX_NUM_CAPS_BITS_PER_FIELD) - 1)) / (PHM_MAX_NUM_CAPS_BITS_PER_FIELD))
221
222struct pp_hw_descriptor {
223 uint32_t hw_caps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES];
224};
225
226enum PHM_PerformanceLevelDesignation {
227 PHM_PerformanceLevelDesignation_Activity,
228 PHM_PerformanceLevelDesignation_PowerContainment
229};
230
231typedef enum PHM_PerformanceLevelDesignation PHM_PerformanceLevelDesignation;
232
233struct PHM_PerformanceLevel {
234 uint32_t coreClock;
235 uint32_t memory_clock;
236 uint32_t vddc;
237 uint32_t vddci;
238 uint32_t nonLocalMemoryFreq;
239 uint32_t nonLocalMemoryWidth;
240};
241
242typedef struct PHM_PerformanceLevel PHM_PerformanceLevel;
243
244/* Function for setting a platform cap */
245static inline void phm_cap_set(uint32_t *caps,
246 enum phm_platform_caps c)
247{
248 caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] |= (1UL <<
249 (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1)));
250}
251
252static inline void phm_cap_unset(uint32_t *caps,
253 enum phm_platform_caps c)
254{
255 caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] &= ~(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1)));
256}
257
258static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps c)
259{
260 return (0 != (caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] &
261 (1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1)))));
262}
263
264#define PP_PCIEGenInvalid 0xffff
265enum PP_PCIEGen {
266 PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */
267 PP_PCIEGen2, /*PCIE 2.0 - Transfer rate of 5.0 GT/s */
268 PP_PCIEGen3 /*PCIE 3.0 - Transfer rate of 8.0 GT/s */
269};
270
271typedef enum PP_PCIEGen PP_PCIEGen;
272
273#define PP_Min_PCIEGen PP_PCIEGen1
274#define PP_Max_PCIEGen PP_PCIEGen3
275#define PP_Min_PCIELane 1
276#define PP_Max_PCIELane 32
277
278enum phm_clock_Type {
279 PHM_DispClock = 1,
280 PHM_SClock,
281 PHM_MemClock
282};
283
284#define MAX_NUM_CLOCKS 16
285
286struct PP_Clocks {
287 uint32_t engineClock;
288 uint32_t memoryClock;
289 uint32_t BusBandwidth;
290 uint32_t engineClockInSR;
291};
292
293struct phm_platform_descriptor {
294 uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES];
295 uint32_t vbiosInterruptId;
296 struct PP_Clocks overdriveLimit;
297 struct PP_Clocks clockStep;
298 uint32_t hardwareActivityPerformanceLevels;
299 uint32_t minimumClocksReductionPercentage;
300 uint32_t minOverdriveVDDC;
301 uint32_t maxOverdriveVDDC;
302 uint32_t overdriveVDDCStep;
303 uint32_t hardwarePerformanceLevels;
304 uint16_t powerBudget;
305 uint32_t TDPLimit;
306 uint32_t nearTDPLimit;
307 uint32_t nearTDPLimitAdjusted;
308 uint32_t SQRampingThreshold;
309 uint32_t CACLeakage;
310 uint16_t TDPODLimit;
311 uint32_t TDPAdjustment;
312 bool TDPAdjustmentPolarity;
313 uint16_t LoadLineSlope;
314 uint32_t VidMinLimit;
315 uint32_t VidMaxLimit;
316 uint32_t VidStep;
317 uint32_t VidAdjustment;
318 bool VidAdjustmentPolarity;
319};
320
321struct phm_clocks {
322 uint32_t num_of_entries;
323 uint32_t clock[MAX_NUM_CLOCKS];
324};
325
326enum PP_DAL_POWERLEVEL {
327 PP_DAL_POWERLEVEL_INVALID = 0,
328 PP_DAL_POWERLEVEL_ULTRALOW,
329 PP_DAL_POWERLEVEL_LOW,
330 PP_DAL_POWERLEVEL_NOMINAL,
331 PP_DAL_POWERLEVEL_PERFORMANCE,
332
333 PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW,
334 PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW,
335 PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL,
336 PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE,
337 PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1,
338 PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1,
339 PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1,
340 PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1,
341};
342
343
344extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
345extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
346extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
347extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
348extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
349extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
350extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
351extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
352extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
353extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
354 const struct pp_hw_power_state *pcurrent_state,
355 const struct pp_hw_power_state *pnew_power_state);
356
357extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
358 struct pp_power_state *adjusted_ps,
359 const struct pp_power_state *current_ps);
360
361extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level);
362extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
363extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
364extern int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info);
365extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range);
366extern int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr);
367extern bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr);
368
369extern int phm_check_states_equal(struct pp_hwmgr *hwmgr,
370 const struct pp_hw_power_state *pstate1,
371 const struct pp_hw_power_state *pstate2,
372 bool *equal);
373
374extern int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
375 const struct amd_pp_display_configuration *display_config);
376
377extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
378 struct amd_pp_dal_clock_info*info);
379
380extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr);
381
382extern int phm_power_down_asic(struct pp_hwmgr *hwmgr);
383
384#endif /* _HARDWARE_MANAGER_H_ */
385
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
new file mode 100644
index 000000000000..aeaa3dbba525
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -0,0 +1,801 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _HWMGR_H_
24#define _HWMGR_H_
25
26#include <linux/seq_file.h>
27#include "amd_powerplay.h"
28#include "pp_instance.h"
29#include "hardwaremanager.h"
30#include "pp_power_source.h"
31#include "hwmgr_ppt.h"
32#include "ppatomctrl.h"
33#include "hwmgr_ppt.h"
34
35struct pp_instance;
36struct pp_hwmgr;
37struct pp_hw_power_state;
38struct pp_power_state;
39struct PP_VCEState;
40struct phm_fan_speed_info;
41struct pp_atomctrl_voltage_table;
42
43
44enum DISPLAY_GAP {
45 DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
46 DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */
47 DISPLAY_GAP_WATERMARK = 2, /* Wait for MCHG watermark. (Note that HW may deassert WM in VBI depending on DC_STUTTER_CNTL.) */
48 DISPLAY_GAP_IGNORE = 3 /* Do not wait. */
49};
50typedef enum DISPLAY_GAP DISPLAY_GAP;
51
52
53struct vi_dpm_level {
54 bool enabled;
55 uint32_t value;
56 uint32_t param1;
57};
58
59struct vi_dpm_table {
60 uint32_t count;
61 struct vi_dpm_level dpm_level[1];
62};
63
64enum PP_Result {
65 PP_Result_TableImmediateExit = 0x13,
66};
67
68#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
69#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
70#define PCIE_PERF_REQ_GEN1 2
71#define PCIE_PERF_REQ_GEN2 3
72#define PCIE_PERF_REQ_GEN3 4
73
74enum PHM_BackEnd_Magic {
75 PHM_Dummy_Magic = 0xAA5555AA,
76 PHM_RV770_Magic = 0xDCBAABCD,
77 PHM_Kong_Magic = 0x239478DF,
78 PHM_NIslands_Magic = 0x736C494E,
79 PHM_Sumo_Magic = 0x8339FA11,
80 PHM_SIslands_Magic = 0x369431AC,
81 PHM_Trinity_Magic = 0x96751873,
82 PHM_CIslands_Magic = 0x38AC78B0,
83 PHM_Kv_Magic = 0xDCBBABC0,
84 PHM_VIslands_Magic = 0x20130307,
85 PHM_Cz_Magic = 0x67DCBA25
86};
87
88
89#define PHM_PCIE_POWERGATING_TARGET_GFX 0
90#define PHM_PCIE_POWERGATING_TARGET_DDI 1
91#define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2
92#define PHM_PCIE_POWERGATING_TARGET_PHY 3
93
94typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input,
95 void *output, void *storage, int result);
96
97typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr);
98
99struct phm_set_power_state_input {
100 const struct pp_hw_power_state *pcurrent_state;
101 const struct pp_hw_power_state *pnew_state;
102};
103
104struct phm_acp_arbiter {
105 uint32_t acpclk;
106};
107
108struct phm_uvd_arbiter {
109 uint32_t vclk;
110 uint32_t dclk;
111 uint32_t vclk_ceiling;
112 uint32_t dclk_ceiling;
113};
114
115struct phm_vce_arbiter {
116 uint32_t evclk;
117 uint32_t ecclk;
118};
119
120struct phm_gfx_arbiter {
121 uint32_t sclk;
122 uint32_t mclk;
123 uint32_t sclk_over_drive;
124 uint32_t mclk_over_drive;
125 uint32_t sclk_threshold;
126 uint32_t num_cus;
127};
128
129/* Entries in the master tables */
130struct phm_master_table_item {
131 phm_check_function isFunctionNeededInRuntimeTable;
132 phm_table_function tableFunction;
133};
134
135enum phm_master_table_flag {
136 PHM_MasterTableFlag_None = 0,
137 PHM_MasterTableFlag_ExitOnError = 1,
138};
139
140/* The header of the master tables */
141struct phm_master_table_header {
142 uint32_t storage_size;
143 uint32_t flags;
144 struct phm_master_table_item *master_list;
145};
146
147struct phm_runtime_table_header {
148 uint32_t storage_size;
149 bool exit_error;
150 phm_table_function *function_list;
151};
152
153struct phm_clock_array {
154 uint32_t count;
155 uint32_t values[1];
156};
157
158struct phm_clock_voltage_dependency_record {
159 uint32_t clk;
160 uint32_t v;
161};
162
163struct phm_vceclock_voltage_dependency_record {
164 uint32_t ecclk;
165 uint32_t evclk;
166 uint32_t v;
167};
168
169struct phm_uvdclock_voltage_dependency_record {
170 uint32_t vclk;
171 uint32_t dclk;
172 uint32_t v;
173};
174
175struct phm_samuclock_voltage_dependency_record {
176 uint32_t samclk;
177 uint32_t v;
178};
179
180struct phm_acpclock_voltage_dependency_record {
181 uint32_t acpclk;
182 uint32_t v;
183};
184
185struct phm_clock_voltage_dependency_table {
186 uint32_t count; /* Number of entries. */
187 struct phm_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
188};
189
190struct phm_phase_shedding_limits_record {
191 uint32_t Voltage;
192 uint32_t Sclk;
193 uint32_t Mclk;
194};
195
196
197extern int phm_dispatch_table(struct pp_hwmgr *hwmgr,
198 struct phm_runtime_table_header *rt_table,
199 void *input, void *output);
200
201extern int phm_construct_table(struct pp_hwmgr *hwmgr,
202 struct phm_master_table_header *master_table,
203 struct phm_runtime_table_header *rt_table);
204
205extern int phm_destroy_table(struct pp_hwmgr *hwmgr,
206 struct phm_runtime_table_header *rt_table);
207
208
209struct phm_uvd_clock_voltage_dependency_record {
210 uint32_t vclk;
211 uint32_t dclk;
212 uint32_t v;
213};
214
215struct phm_uvd_clock_voltage_dependency_table {
216 uint8_t count;
217 struct phm_uvd_clock_voltage_dependency_record entries[1];
218};
219
220struct phm_acp_clock_voltage_dependency_record {
221 uint32_t acpclk;
222 uint32_t v;
223};
224
225struct phm_acp_clock_voltage_dependency_table {
226 uint32_t count;
227 struct phm_acp_clock_voltage_dependency_record entries[1];
228};
229
230struct phm_vce_clock_voltage_dependency_record {
231 uint32_t ecclk;
232 uint32_t evclk;
233 uint32_t v;
234};
235
236struct phm_phase_shedding_limits_table {
237 uint32_t count;
238 struct phm_phase_shedding_limits_record entries[1];
239};
240
241struct phm_vceclock_voltage_dependency_table {
242 uint8_t count; /* Number of entries. */
243 struct phm_vceclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
244};
245
246struct phm_uvdclock_voltage_dependency_table {
247 uint8_t count; /* Number of entries. */
248 struct phm_uvdclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
249};
250
251struct phm_samuclock_voltage_dependency_table {
252 uint8_t count; /* Number of entries. */
253 struct phm_samuclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
254};
255
256struct phm_acpclock_voltage_dependency_table {
257 uint32_t count; /* Number of entries. */
258 struct phm_acpclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
259};
260
261struct phm_vce_clock_voltage_dependency_table {
262 uint8_t count;
263 struct phm_vce_clock_voltage_dependency_record entries[1];
264};
265
266struct pp_hwmgr_func {
267 int (*backend_init)(struct pp_hwmgr *hw_mgr);
268 int (*backend_fini)(struct pp_hwmgr *hw_mgr);
269 int (*asic_setup)(struct pp_hwmgr *hw_mgr);
270 int (*get_power_state_size)(struct pp_hwmgr *hw_mgr);
271
272 int (*apply_state_adjust_rules)(struct pp_hwmgr *hwmgr,
273 struct pp_power_state *prequest_ps,
274 const struct pp_power_state *pcurrent_ps);
275
276 int (*force_dpm_level)(struct pp_hwmgr *hw_mgr,
277 enum amd_dpm_forced_level level);
278
279 int (*dynamic_state_management_enable)(
280 struct pp_hwmgr *hw_mgr);
281
282 int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
283 struct pp_hw_power_state *hw_ps);
284
285 int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr,
286 unsigned long, struct pp_power_state *);
287 int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
288 int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
289 int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
290 int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
291 int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
292 int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
293 int (*power_state_set)(struct pp_hwmgr *hwmgr,
294 const void *state);
295 void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
296 struct seq_file *m);
297 int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
298 int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
299 int (*display_config_changed)(struct pp_hwmgr *hwmgr);
300 int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr);
301 int (*update_clock_gatings)(struct pp_hwmgr *hwmgr,
302 const uint32_t *msg_id);
303 int (*set_max_fan_rpm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm);
304 int (*set_max_fan_pwm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm);
305 int (*get_temperature)(struct pp_hwmgr *hwmgr);
306 int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr);
307 int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
308 int (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
309 int (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
310 int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent);
311 int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed);
312 int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent);
313 int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
314 int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr);
315 int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr);
316 int (*register_internal_thermal_interrupt)(struct pp_hwmgr *hwmgr,
317 const void *thermal_interrupt_info);
318 bool (*check_smc_update_required_for_display_configuration)(struct pp_hwmgr *hwmgr);
319 int (*check_states_equal)(struct pp_hwmgr *hwmgr,
320 const struct pp_hw_power_state *pstate1,
321 const struct pp_hw_power_state *pstate2,
322 bool *equal);
323 int (*set_cpu_power_state)(struct pp_hwmgr *hwmgr);
324 int (*store_cc6_data)(struct pp_hwmgr *hwmgr, uint32_t separation_time,
325 bool cc6_disable, bool pstate_disable,
326 bool pstate_switch_disable);
327 int (*get_dal_power_level)(struct pp_hwmgr *hwmgr,
328 struct amd_pp_dal_clock_info *info);
329 int (*power_off_asic)(struct pp_hwmgr *hwmgr);
330};
331
332struct pp_table_func {
333 int (*pptable_init)(struct pp_hwmgr *hw_mgr);
334 int (*pptable_fini)(struct pp_hwmgr *hw_mgr);
335 int (*pptable_get_number_of_vce_state_table_entries)(struct pp_hwmgr *hw_mgr);
336 int (*pptable_get_vce_state_table_entry)(
337 struct pp_hwmgr *hwmgr,
338 unsigned long i,
339 struct PP_VCEState *vce_state,
340 void **clock_info,
341 unsigned long *flag);
342};
343
344union phm_cac_leakage_record {
345 struct {
346 uint16_t Vddc; /* in CI, we use it for StdVoltageHiSidd */
347 uint32_t Leakage; /* in CI, we use it for StdVoltageLoSidd */
348 };
349 struct {
350 uint16_t Vddc1;
351 uint16_t Vddc2;
352 uint16_t Vddc3;
353 };
354};
355
356struct phm_cac_leakage_table {
357 uint32_t count;
358 union phm_cac_leakage_record entries[1];
359};
360
361struct phm_samu_clock_voltage_dependency_record {
362 uint32_t samclk;
363 uint32_t v;
364};
365
366
367struct phm_samu_clock_voltage_dependency_table {
368 uint8_t count;
369 struct phm_samu_clock_voltage_dependency_record entries[1];
370};
371
372struct phm_cac_tdp_table {
373 uint16_t usTDP;
374 uint16_t usConfigurableTDP;
375 uint16_t usTDC;
376 uint16_t usBatteryPowerLimit;
377 uint16_t usSmallPowerLimit;
378 uint16_t usLowCACLeakage;
379 uint16_t usHighCACLeakage;
380 uint16_t usMaximumPowerDeliveryLimit;
381 uint16_t usOperatingTempMinLimit;
382 uint16_t usOperatingTempMaxLimit;
383 uint16_t usOperatingTempStep;
384 uint16_t usOperatingTempHyst;
385 uint16_t usDefaultTargetOperatingTemp;
386 uint16_t usTargetOperatingTemp;
387 uint16_t usPowerTuneDataSetID;
388 uint16_t usSoftwareShutdownTemp;
389 uint16_t usClockStretchAmount;
390 uint16_t usTemperatureLimitHotspot;
391 uint16_t usTemperatureLimitLiquid1;
392 uint16_t usTemperatureLimitLiquid2;
393 uint16_t usTemperatureLimitVrVddc;
394 uint16_t usTemperatureLimitVrMvdd;
395 uint16_t usTemperatureLimitPlx;
396 uint8_t ucLiquid1_I2C_address;
397 uint8_t ucLiquid2_I2C_address;
398 uint8_t ucLiquid_I2C_Line;
399 uint8_t ucVr_I2C_address;
400 uint8_t ucVr_I2C_Line;
401 uint8_t ucPlx_I2C_address;
402 uint8_t ucPlx_I2C_Line;
403};
404
405struct phm_ppm_table {
406 uint8_t ppm_design;
407 uint16_t cpu_core_number;
408 uint32_t platform_tdp;
409 uint32_t small_ac_platform_tdp;
410 uint32_t platform_tdc;
411 uint32_t small_ac_platform_tdc;
412 uint32_t apu_tdp;
413 uint32_t dgpu_tdp;
414 uint32_t dgpu_ulv_power;
415 uint32_t tj_max;
416};
417
418struct phm_vq_budgeting_record {
419 uint32_t ulCUs;
420 uint32_t ulSustainableSOCPowerLimitLow;
421 uint32_t ulSustainableSOCPowerLimitHigh;
422 uint32_t ulMinSclkLow;
423 uint32_t ulMinSclkHigh;
424 uint8_t ucDispConfig;
425 uint32_t ulDClk;
426 uint32_t ulEClk;
427 uint32_t ulSustainableSclk;
428 uint32_t ulSustainableCUs;
429};
430
431struct phm_vq_budgeting_table {
432 uint8_t numEntries;
433 struct phm_vq_budgeting_record entries[1];
434};
435
436struct phm_clock_and_voltage_limits {
437 uint32_t sclk;
438 uint32_t mclk;
439 uint16_t vddc;
440 uint16_t vddci;
441 uint16_t vddgfx;
442};
443
444/* Structure to hold PPTable information */
445
446struct phm_ppt_v1_information {
447 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk;
448 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk;
449 struct phm_clock_array *valid_sclk_values;
450 struct phm_clock_array *valid_mclk_values;
451 struct phm_clock_and_voltage_limits max_clock_voltage_on_dc;
452 struct phm_clock_and_voltage_limits max_clock_voltage_on_ac;
453 struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl;
454 struct phm_ppm_table *ppm_parameter_table;
455 struct phm_cac_tdp_table *cac_dtp_table;
456 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table;
457 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
458 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table;
459 struct phm_ppt_v1_pcie_table *pcie_table;
460 uint16_t us_ulv_voltage_offset;
461};
462
463struct phm_dynamic_state_info {
464 struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk;
465 struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk;
466 struct phm_clock_voltage_dependency_table *vddc_dependency_on_mclk;
467 struct phm_clock_voltage_dependency_table *mvdd_dependency_on_mclk;
468 struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl;
469 struct phm_clock_array *valid_sclk_values;
470 struct phm_clock_array *valid_mclk_values;
471 struct phm_clock_and_voltage_limits max_clock_voltage_on_dc;
472 struct phm_clock_and_voltage_limits max_clock_voltage_on_ac;
473 uint32_t mclk_sclk_ratio;
474 uint32_t sclk_mclk_delta;
475 uint32_t vddc_vddci_delta;
476 uint32_t min_vddc_for_pcie_gen2;
477 struct phm_cac_leakage_table *cac_leakage_table;
478 struct phm_phase_shedding_limits_table *vddc_phase_shed_limits_table;
479
480 struct phm_vce_clock_voltage_dependency_table
481 *vce_clock_voltage_dependency_table;
482 struct phm_uvd_clock_voltage_dependency_table
483 *uvd_clock_voltage_dependency_table;
484 struct phm_acp_clock_voltage_dependency_table
485 *acp_clock_voltage_dependency_table;
486 struct phm_samu_clock_voltage_dependency_table
487 *samu_clock_voltage_dependency_table;
488
489 struct phm_ppm_table *ppm_parameter_table;
490 struct phm_cac_tdp_table *cac_dtp_table;
491 struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk;
492 struct phm_vq_budgeting_table *vq_budgeting_table;
493};
494
495struct pp_fan_info {
496 bool bNoFan;
497 uint8_t ucTachometerPulsesPerRevolution;
498 uint32_t ulMinRPM;
499 uint32_t ulMaxRPM;
500};
501
502struct pp_advance_fan_control_parameters {
503 uint16_t usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
504 uint16_t usTMed; /* The middle temperature where we change slopes. */
505 uint16_t usTHigh; /* The high temperature for setting the second slope. */
506 uint16_t usPWMMin; /* The minimum PWM value in percent (0.01% increments). */
507 uint16_t usPWMMed; /* The PWM value (in percent) at TMed. */
508 uint16_t usPWMHigh; /* The PWM value at THigh. */
509 uint8_t ucTHyst; /* Temperature hysteresis. Integer. */
510 uint32_t ulCycleDelay; /* The time between two invocations of the fan control routine in microseconds. */
511 uint16_t usTMax; /* The max temperature */
512 uint8_t ucFanControlMode;
513 uint16_t usFanPWMMinLimit;
514 uint16_t usFanPWMMaxLimit;
515 uint16_t usFanPWMStep;
516 uint16_t usDefaultMaxFanPWM;
517 uint16_t usFanOutputSensitivity;
518 uint16_t usDefaultFanOutputSensitivity;
519 uint16_t usMaxFanPWM; /* The max Fan PWM value for Fuzzy Fan Control feature */
520 uint16_t usFanRPMMinLimit; /* Minimum limit range in percentage, need to calculate based on minRPM/MaxRpm */
521 uint16_t usFanRPMMaxLimit; /* Maximum limit range in percentage, usually set to 100% by default */
522 uint16_t usFanRPMStep; /* Step increments/decerements, in percent */
523 uint16_t usDefaultMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, default from PPTable */
524 uint16_t usMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, user defined */
525 uint16_t usFanCurrentLow; /* Low current */
526 uint16_t usFanCurrentHigh; /* High current */
527 uint16_t usFanRPMLow; /* Low RPM */
528 uint16_t usFanRPMHigh; /* High RPM */
529 uint32_t ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
530 uint8_t ucTargetTemperature; /* Advanced fan controller target temperature. */
531 uint8_t ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */
532 uint16_t usFanGainEdge; /* The following is added for Fiji */
533 uint16_t usFanGainHotspot;
534 uint16_t usFanGainLiquid;
535 uint16_t usFanGainVrVddc;
536 uint16_t usFanGainVrMvdd;
537 uint16_t usFanGainPlx;
538 uint16_t usFanGainHbm;
539};
540
541struct pp_thermal_controller_info {
542 uint8_t ucType;
543 uint8_t ucI2cLine;
544 uint8_t ucI2cAddress;
545 struct pp_fan_info fanInfo;
546 struct pp_advance_fan_control_parameters advanceFanControlParameters;
547};
548
549struct phm_microcode_version_info {
550 uint32_t SMC;
551 uint32_t DMCU;
552 uint32_t MC;
553 uint32_t NB;
554};
555
556/**
557 * The main hardware manager structure.
558 */
559struct pp_hwmgr {
560 uint32_t chip_family;
561 uint32_t chip_id;
562 uint32_t hw_revision;
563 uint32_t sub_sys_id;
564 uint32_t sub_vendor_id;
565
566 void *device;
567 struct pp_smumgr *smumgr;
568 const void *soft_pp_table;
569 bool need_pp_table_upload;
570 enum amd_dpm_forced_level dpm_level;
571 bool block_hw_access;
572 struct phm_gfx_arbiter gfx_arbiter;
573 struct phm_acp_arbiter acp_arbiter;
574 struct phm_uvd_arbiter uvd_arbiter;
575 struct phm_vce_arbiter vce_arbiter;
576 uint32_t usec_timeout;
577 void *pptable;
578 struct phm_platform_descriptor platform_descriptor;
579 void *backend;
580 enum PP_DAL_POWERLEVEL dal_power_level;
581 struct phm_dynamic_state_info dyn_state;
582 struct phm_runtime_table_header setup_asic;
583 struct phm_runtime_table_header power_down_asic;
584 struct phm_runtime_table_header disable_dynamic_state_management;
585 struct phm_runtime_table_header enable_dynamic_state_management;
586 struct phm_runtime_table_header set_power_state;
587 struct phm_runtime_table_header enable_clock_power_gatings;
588 struct phm_runtime_table_header display_configuration_changed;
589 struct phm_runtime_table_header start_thermal_controller;
590 struct phm_runtime_table_header set_temperature_range;
591 const struct pp_hwmgr_func *hwmgr_func;
592 const struct pp_table_func *pptable_func;
593 struct pp_power_state *ps;
594 enum pp_power_source power_source;
595 uint32_t num_ps;
596 struct pp_thermal_controller_info thermal_controller;
597 bool fan_ctrl_is_in_default_mode;
598 uint32_t fan_ctrl_default_mode;
599 uint32_t tmin;
600 struct phm_microcode_version_info microcode_version_info;
601 uint32_t ps_size;
602 struct pp_power_state *current_ps;
603 struct pp_power_state *request_ps;
604 struct pp_power_state *boot_ps;
605 struct pp_power_state *uvd_ps;
606 struct amd_pp_display_configuration display_config;
607};
608
609
610extern int hwmgr_init(struct amd_pp_init *pp_init,
611 struct pp_instance *handle);
612
613extern int hwmgr_fini(struct pp_hwmgr *hwmgr);
614
615extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr);
616
617extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
618 uint32_t value, uint32_t mask);
619
620extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
621 uint32_t index, uint32_t value, uint32_t mask);
622
623extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr,
624 uint32_t indirect_port, uint32_t index);
625
626extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr,
627 uint32_t indirect_port,
628 uint32_t index,
629 uint32_t value);
630
631extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
632 uint32_t indirect_port,
633 uint32_t index,
634 uint32_t value,
635 uint32_t mask);
636
637extern void phm_wait_for_indirect_register_unequal(
638 struct pp_hwmgr *hwmgr,
639 uint32_t indirect_port,
640 uint32_t index,
641 uint32_t value,
642 uint32_t mask);
643
644extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
645extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
646extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
647
648extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
649extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
650extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
651extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
652extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
653extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
654extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
655extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
656extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
657extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
658extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
659extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
660 uint16_t virtual_voltage_id, int32_t *sclk);
661extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
662extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
663extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
664
665
666#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
667
668#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
669#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
670
671#define PHM_SET_FIELD(origval, reg, field, fieldval) \
672 (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
673 (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
674
675#define PHM_GET_FIELD(value, reg, field) \
676 (((value) & PHM_FIELD_MASK(reg, field)) >> \
677 PHM_FIELD_SHIFT(reg, field))
678
679
680#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \
681 phm_wait_on_register(hwmgr, index, value, mask)
682
683#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \
684 phm_wait_for_register_unequal(hwmgr, index, value, mask)
685
686#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
687 phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
688
689#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
690 phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask)
691
692#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
693 phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask)
694
695#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
696 phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask)
697
698/* Operations on named registers. */
699
700#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \
701 PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
702
703#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
704 PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
705
706#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
707 PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
708
709#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
710 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
711
712#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
713 PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
714
715#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
716 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
717
718/* Operations on named fields. */
719
720#define PHM_READ_FIELD(device, reg, field) \
721 PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
722
723#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
724 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
725 reg, field)
726
727#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
728 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
729 reg, field)
730
731#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
732 cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
733 cgs_read_register(device, mm##reg), reg, field, fieldval))
734
735#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
736 cgs_write_ind_register(device, port, ix##reg, \
737 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
738 reg, field, fieldval))
739
740#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
741 cgs_write_ind_register(device, port, ix##reg, \
742 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
743 reg, field, fieldval))
744
745#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \
746 PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \
747 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
748
749#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
750 PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
751 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
752
753#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
754 PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
755 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
756
757#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
758 PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \
759 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
760
761#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
762 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \
763 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
764
765#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
766 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \
767 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
768
769/* Operations on arrays of registers & fields. */
770
771#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \
772 cgs_read_register(device, mm##reg + (offset))
773
774#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \
775 cgs_write_register(device, mm##reg + (offset), value)
776
777#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \
778 PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
779
780#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \
781 PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
782
783#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \
784 PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field)
785
786#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \
787 PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \
788 PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \
789 reg, field, fieldvalue))
790
791#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \
792 PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \
793 (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \
794 PHM_FIELD_MASK(reg, field))
795
796#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \
797 PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \
798 (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \
799 PHM_FIELD_MASK(reg, field))
800
801#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
new file mode 100644
index 000000000000..a3f0ce4d5835
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -0,0 +1,200 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef PP_POWERSTATE_H
24#define PP_POWERSTATE_H
25
26struct pp_hw_power_state {
27 unsigned int magic;
28};
29
30struct pp_power_state;
31
32
33#define PP_INVALID_POWER_STATE_ID (0)
34
35
36/*
37 * An item of a list containing Power States.
38 */
39
40struct PP_StateLinkedList {
41 struct pp_power_state *next;
42 struct pp_power_state *prev;
43};
44
45
46enum PP_StateUILabel {
47 PP_StateUILabel_None,
48 PP_StateUILabel_Battery,
49 PP_StateUILabel_MiddleLow,
50 PP_StateUILabel_Balanced,
51 PP_StateUILabel_MiddleHigh,
52 PP_StateUILabel_Performance,
53 PP_StateUILabel_BACO
54};
55
56enum PP_StateClassificationFlag {
57 PP_StateClassificationFlag_Boot = 0x0001,
58 PP_StateClassificationFlag_Thermal = 0x0002,
59 PP_StateClassificationFlag_LimitedPowerSource = 0x0004,
60 PP_StateClassificationFlag_Rest = 0x0008,
61 PP_StateClassificationFlag_Forced = 0x0010,
62 PP_StateClassificationFlag_User3DPerformance = 0x0020,
63 PP_StateClassificationFlag_User2DPerformance = 0x0040,
64 PP_StateClassificationFlag_3DPerformance = 0x0080,
65 PP_StateClassificationFlag_ACOverdriveTemplate = 0x0100,
66 PP_StateClassificationFlag_Uvd = 0x0200,
67 PP_StateClassificationFlag_3DPerformanceLow = 0x0400,
68 PP_StateClassificationFlag_ACPI = 0x0800,
69 PP_StateClassificationFlag_HD2 = 0x1000,
70 PP_StateClassificationFlag_UvdHD = 0x2000,
71 PP_StateClassificationFlag_UvdSD = 0x4000,
72 PP_StateClassificationFlag_UserDCPerformance = 0x8000,
73 PP_StateClassificationFlag_DCOverdriveTemplate = 0x10000,
74 PP_StateClassificationFlag_BACO = 0x20000,
75 PP_StateClassificationFlag_LimitedPowerSource_2 = 0x40000,
76 PP_StateClassificationFlag_ULV = 0x80000,
77 PP_StateClassificationFlag_UvdMVC = 0x100000,
78};
79
80typedef unsigned int PP_StateClassificationFlags;
81
82struct PP_StateClassificationBlock {
83 enum PP_StateUILabel ui_label;
84 enum PP_StateClassificationFlag flags;
85 int bios_index;
86 bool temporary_state;
87 bool to_be_deleted;
88};
89
90struct PP_StatePcieBlock {
91 unsigned int lanes;
92};
93
94enum PP_RefreshrateSource {
95 PP_RefreshrateSource_EDID,
96 PP_RefreshrateSource_Explicit
97};
98
99struct PP_StateDisplayBlock {
100 bool disableFrameModulation;
101 bool limitRefreshrate;
102 enum PP_RefreshrateSource refreshrateSource;
103 int explicitRefreshrate;
104 int edidRefreshrateIndex;
105 bool enableVariBright;
106};
107
108struct PP_StateMemroyBlock {
109 bool dllOff;
110 uint8_t m3arb;
111 uint8_t unused[3];
112};
113
114struct PP_StateSoftwareAlgorithmBlock {
115 bool disableLoadBalancing;
116 bool enableSleepForTimestamps;
117};
118
119#define PP_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
120
121/**
122 * Type to hold a temperature range.
123 */
124struct PP_TemperatureRange {
125 uint32_t min;
126 uint32_t max;
127};
128
129struct PP_StateValidationBlock {
130 bool singleDisplayOnly;
131 bool disallowOnDC;
132 uint8_t supportedPowerLevels;
133};
134
135struct PP_UVD_CLOCKS {
136 uint32_t VCLK;
137 uint32_t DCLK;
138};
139
140/**
141* Structure to hold a PowerPlay Power State.
142*/
143struct pp_power_state {
144 uint32_t id;
145 struct PP_StateLinkedList orderedList;
146 struct PP_StateLinkedList allStatesList;
147
148 struct PP_StateClassificationBlock classification;
149 struct PP_StateValidationBlock validation;
150 struct PP_StatePcieBlock pcie;
151 struct PP_StateDisplayBlock display;
152 struct PP_StateMemroyBlock memory;
153 struct PP_TemperatureRange temperatures;
154 struct PP_StateSoftwareAlgorithmBlock software;
155 struct PP_UVD_CLOCKS uvd_clocks;
156 struct pp_hw_power_state hardware;
157};
158
159
160/*Structure to hold a VCE state entry*/
161struct PP_VCEState {
162 uint32_t evclk;
163 uint32_t ecclk;
164 uint32_t sclk;
165 uint32_t mclk;
166};
167
168enum PP_MMProfilingState {
169 PP_MMProfilingState_NA = 0,
170 PP_MMProfilingState_Started,
171 PP_MMProfilingState_Stopped
172};
173
174struct PP_Clock_Engine_Request {
175 unsigned long clientType;
176 unsigned long ctxid;
177 uint64_t context_handle;
178 unsigned long sclk;
179 unsigned long sclkHardMin;
180 unsigned long mclk;
181 unsigned long iclk;
182 unsigned long evclk;
183 unsigned long ecclk;
184 unsigned long ecclkHardMin;
185 unsigned long vclk;
186 unsigned long dclk;
187 unsigned long samclk;
188 unsigned long acpclk;
189 unsigned long sclkOverdrive;
190 unsigned long mclkOverdrive;
191 unsigned long sclk_threshold;
192 unsigned long flag;
193 unsigned long vclk_ceiling;
194 unsigned long dclk_ceiling;
195 unsigned long num_cus;
196 unsigned long pmflag;
197 enum PP_MMProfilingState MMProfilingState;
198};
199
200#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
new file mode 100644
index 000000000000..3bd5e69b9045
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24extern bool acpi_atcs_functions_supported(void *device,
25 uint32_t index);
26extern int acpi_pcie_perf_request(void *device,
27 uint8_t perf_req,
28 bool advertise);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h b/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h
new file mode 100644
index 000000000000..0c1593e53654
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef PP_ASICBLOCKS_H
24#define PP_ASICBLOCKS_H
25
26
27enum PHM_AsicBlock {
28 PHM_AsicBlock_GFX,
29 PHM_AsicBlock_UVD_MVC,
30 PHM_AsicBlock_UVD,
31 PHM_AsicBlock_UVD_HD,
32 PHM_AsicBlock_UVD_SD,
33 PHM_AsicBlock_Count
34};
35
36enum PHM_ClockGateSetting {
37 PHM_ClockGateSetting_StaticOn,
38 PHM_ClockGateSetting_StaticOff,
39 PHM_ClockGateSetting_Dynamic
40};
41
42struct phm_asic_blocks {
43 bool gfx : 1;
44 bool uvd : 1;
45};
46
47#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
new file mode 100644
index 000000000000..d7d83b7c7f95
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -0,0 +1,47 @@
1
2/*
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24#ifndef PP_DEBUG_H
25#define PP_DEBUG_H
26
27#include <linux/types.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30
31#define PP_ASSERT_WITH_CODE(cond, msg, code) \
32 do { \
33 if (!(cond)) { \
34 printk("%s\n", msg); \
35 code; \
36 } \
37 } while (0)
38
39
40#define PP_DBG_LOG(fmt, ...) \
41 do { \
42 if(0)printk(KERN_INFO "[ pp_dbg ] " fmt, ##__VA_ARGS__); \
43 } while (0)
44
45
46#endif /* PP_DEBUG_H */
47
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h b/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h
new file mode 100644
index 000000000000..0faf6a25c18b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _PP_FEATURE_H_
25#define _PP_FEATURE_H_
26
27/**
28 * PowerPlay feature ids.
29 */
30enum pp_feature {
31 PP_Feature_PowerPlay = 0,
32 PP_Feature_User2DPerformance,
33 PP_Feature_User3DPerformance,
34 PP_Feature_VariBright,
35 PP_Feature_VariBrightOnPowerXpress,
36 PP_Feature_ReducedRefreshRate,
37 PP_Feature_GFXClockGating,
38 PP_Feature_OverdriveTest,
39 PP_Feature_OverDrive,
40 PP_Feature_PowerBudgetWaiver,
41 PP_Feature_PowerControl,
42 PP_Feature_PowerControl_2,
43 PP_Feature_MultiUVDState,
44 PP_Feature_Force3DClock,
45 PP_Feature_BACO,
46 PP_Feature_PowerDown,
47 PP_Feature_DynamicUVDState,
48 PP_Feature_VCEDPM,
49 PP_Feature_PPM,
50 PP_Feature_ACP_POWERGATING,
51 PP_Feature_FFC,
52 PP_Feature_FPS,
53 PP_Feature_ViPG,
54 PP_Feature_Max
55};
56
57/**
58 * Struct for PowerPlay feature info.
59 */
60struct pp_feature_info {
61 bool supported; /* feature supported by PowerPlay */
62 bool enabled; /* feature enabled in PowerPlay */
63 bool enabled_default; /* default enable status of the feature */
64 uint32_t version; /* feature version */
65};
66
67#endif /* _PP_FEATURE_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
new file mode 100644
index 000000000000..4d8ed1f33de4
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _PP_INSTANCE_H_
24#define _PP_INSTANCE_H_
25
26#include "smumgr.h"
27#include "hwmgr.h"
28#include "eventmgr.h"
29
30#define PP_VALID 0x1F1F1F1F
31
32struct pp_instance {
33 uint32_t pp_valid;
34 struct pp_smumgr *smu_mgr;
35 struct pp_hwmgr *hwmgr;
36 struct pp_eventmgr *eventmgr;
37};
38
39#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
new file mode 100644
index 000000000000..b43315cc5d58
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef PP_POWERSOURCE_H
25#define PP_POWERSOURCE_H
26
27enum pp_power_source {
28 PP_PowerSource_AC = 0,
29 PP_PowerSource_DC,
30 PP_PowerSource_LimitedPower,
31 PP_PowerSource_LimitedPower_2,
32 PP_PowerSource_Max
33};
34
35
36#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h b/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h
new file mode 100644
index 000000000000..c067e0925b6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _PP_INTERRUPT_H_
25#define _PP_INTERRUPT_H_
26
27enum amd_thermal_irq {
28 AMD_THERMAL_IRQ_LOW_TO_HIGH = 0,
29 AMD_THERMAL_IRQ_HIGH_TO_LOW,
30
31 AMD_THERMAL_IRQ_LAST
32};
33
34/* The type of the interrupt callback functions in PowerPlay */
35typedef int (*irq_handler_func_t)(void *private_data,
36 unsigned src_id, const uint32_t *iv_entry);
37
38/* Event Manager action chain list information */
39struct pp_interrupt_registration_info {
40 irq_handler_func_t call_back; /* Pointer to callback function */
41 void *context; /* Pointer to callback function context */
42 uint32_t src_id; /* Registered interrupt id */
43 const uint32_t *iv_entry;
44};
45
46#endif /* _PP_INTERRUPT_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/smu7.h b/drivers/gpu/drm/amd/powerplay/inc/smu7.h
index 75a380a15292..75a380a15292 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu7.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
new file mode 100644
index 000000000000..b73d6b59ac32
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
@@ -0,0 +1,664 @@
1#ifndef SMU72_H
2#define SMU72_H
3
4#if !defined(SMC_MICROCODE)
5#pragma pack(push, 1)
6#endif
7
8#define SMU__NUM_SCLK_DPM_STATE 8
9#define SMU__NUM_MCLK_DPM_LEVELS 4
10#define SMU__NUM_LCLK_DPM_LEVELS 8
11#define SMU__NUM_PCIE_DPM_LEVELS 8
12
13enum SID_OPTION {
14 SID_OPTION_HI,
15 SID_OPTION_LO,
16 SID_OPTION_COUNT
17};
18
19enum Poly3rdOrderCoeff {
20 LEAKAGE_TEMPERATURE_SCALAR,
21 LEAKAGE_VOLTAGE_SCALAR,
22 DYNAMIC_VOLTAGE_SCALAR,
23 POLY_3RD_ORDER_COUNT
24};
25
26struct SMU7_Poly3rdOrder_Data {
27 int32_t a;
28 int32_t b;
29 int32_t c;
30 int32_t d;
31 uint8_t a_shift;
32 uint8_t b_shift;
33 uint8_t c_shift;
34 uint8_t x_shift;
35};
36
37typedef struct SMU7_Poly3rdOrder_Data SMU7_Poly3rdOrder_Data;
38
39struct Power_Calculator_Data {
40 uint16_t NoLoadVoltage;
41 uint16_t LoadVoltage;
42 uint16_t Resistance;
43 uint16_t Temperature;
44 uint16_t BaseLeakage;
45 uint16_t LkgTempScalar;
46 uint16_t LkgVoltScalar;
47 uint16_t LkgAreaScalar;
48 uint16_t LkgPower;
49 uint16_t DynVoltScalar;
50 uint32_t Cac;
51 uint32_t DynPower;
52 uint32_t TotalCurrent;
53 uint32_t TotalPower;
54};
55
56typedef struct Power_Calculator_Data PowerCalculatorData_t;
57
58struct Gc_Cac_Weight_Data {
59 uint8_t index;
60 uint32_t value;
61};
62
63typedef struct Gc_Cac_Weight_Data GcCacWeight_Data;
64
65
66typedef struct {
67 uint32_t high;
68 uint32_t low;
69} data_64_t;
70
71typedef struct {
72 data_64_t high;
73 data_64_t low;
74} data_128_t;
75
76#define SMU7_CONTEXT_ID_SMC 1
77#define SMU7_CONTEXT_ID_VBIOS 2
78
79#define SMU72_MAX_LEVELS_VDDC 16
80#define SMU72_MAX_LEVELS_VDDGFX 16
81#define SMU72_MAX_LEVELS_VDDCI 8
82#define SMU72_MAX_LEVELS_MVDD 4
83
84#define SMU_MAX_SMIO_LEVELS 4
85
86#define SMU72_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE /* SCLK + SQ DPM + ULV */
87#define SMU72_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS /* MCLK Levels DPM */
88#define SMU72_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS /* LCLK Levels */
89#define SMU72_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS /* PCIe speed and number of lanes. */
90#define SMU72_MAX_LEVELS_UVD 8 /* VCLK/DCLK levels for UVD. */
91#define SMU72_MAX_LEVELS_VCE 8 /* ECLK levels for VCE. */
92#define SMU72_MAX_LEVELS_ACP 8 /* ACLK levels for ACP. */
93#define SMU72_MAX_LEVELS_SAMU 8 /* SAMCLK levels for SAMU. */
94#define SMU72_MAX_ENTRIES_SMIO 32 /* Number of entries in SMIO table. */
95
96#define DPM_NO_LIMIT 0
97#define DPM_NO_UP 1
98#define DPM_GO_DOWN 2
99#define DPM_GO_UP 3
100
101#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
102#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
103
104#define GPIO_CLAMP_MODE_VRHOT 1
105#define GPIO_CLAMP_MODE_THERM 2
106#define GPIO_CLAMP_MODE_DC 4
107
108#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
109#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
110#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
111#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
112#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
113#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
114#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
115#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
116#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
117#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
118#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
119#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
120#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
121#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
122#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
123#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
124#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
125#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
126#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
127#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
128
129/* Virtualization Defines */
130#define CG_XDMA_MASK 0x1
131#define CG_XDMA_SHIFT 0
132#define CG_UVD_MASK 0x2
133#define CG_UVD_SHIFT 1
134#define CG_VCE_MASK 0x4
135#define CG_VCE_SHIFT 2
136#define CG_SAMU_MASK 0x8
137#define CG_SAMU_SHIFT 3
138#define CG_GFX_MASK 0x10
139#define CG_GFX_SHIFT 4
140#define CG_SDMA_MASK 0x20
141#define CG_SDMA_SHIFT 5
142#define CG_HDP_MASK 0x40
143#define CG_HDP_SHIFT 6
144#define CG_MC_MASK 0x80
145#define CG_MC_SHIFT 7
146#define CG_DRM_MASK 0x100
147#define CG_DRM_SHIFT 8
148#define CG_ROM_MASK 0x200
149#define CG_ROM_SHIFT 9
150#define CG_BIF_MASK 0x400
151#define CG_BIF_SHIFT 10
152
153#define SMU72_DTE_ITERATIONS 5
154#define SMU72_DTE_SOURCES 3
155#define SMU72_DTE_SINKS 1
156#define SMU72_NUM_CPU_TES 0
157#define SMU72_NUM_GPU_TES 1
158#define SMU72_NUM_NON_TES 2
159#define SMU72_DTE_FAN_SCALAR_MIN 0x100
160#define SMU72_DTE_FAN_SCALAR_MAX 0x166
161#define SMU72_DTE_FAN_TEMP_MAX 93
162#define SMU72_DTE_FAN_TEMP_MIN 83
163
164#if defined SMU__FUSION_ONLY
165#define SMU7_DTE_ITERATIONS 5
166#define SMU7_DTE_SOURCES 5
167#define SMU7_DTE_SINKS 3
168#define SMU7_NUM_CPU_TES 2
169#define SMU7_NUM_GPU_TES 1
170#define SMU7_NUM_NON_TES 2
171#endif
172
173struct SMU7_HystController_Data {
174 uint8_t waterfall_up;
175 uint8_t waterfall_down;
176 uint8_t waterfall_limit;
177 uint8_t spare;
178 uint16_t release_cnt;
179 uint16_t release_limit;
180};
181
182typedef struct SMU7_HystController_Data SMU7_HystController_Data;
183
184struct SMU72_PIDController {
185 uint32_t Ki;
186 int32_t LFWindupUpperLim;
187 int32_t LFWindupLowerLim;
188 uint32_t StatePrecision;
189 uint32_t LfPrecision;
190 uint32_t LfOffset;
191 uint32_t MaxState;
192 uint32_t MaxLfFraction;
193 uint32_t StateShift;
194};
195
196typedef struct SMU72_PIDController SMU72_PIDController;
197
198struct SMU7_LocalDpmScoreboard {
199 uint32_t PercentageBusy;
200
201 int32_t PIDError;
202 int32_t PIDIntegral;
203 int32_t PIDOutput;
204
205 uint32_t SigmaDeltaAccum;
206 uint32_t SigmaDeltaOutput;
207 uint32_t SigmaDeltaLevel;
208
209 uint32_t UtilizationSetpoint;
210
211 uint8_t TdpClampMode;
212 uint8_t TdcClampMode;
213 uint8_t ThermClampMode;
214 uint8_t VoltageBusy;
215
216 int8_t CurrLevel;
217 int8_t TargLevel;
218 uint8_t LevelChangeInProgress;
219 uint8_t UpHyst;
220
221 uint8_t DownHyst;
222 uint8_t VoltageDownHyst;
223 uint8_t DpmEnable;
224 uint8_t DpmRunning;
225
226 uint8_t DpmForce;
227 uint8_t DpmForceLevel;
228 uint8_t DisplayWatermark;
229 uint8_t McArbIndex;
230
231 uint32_t MinimumPerfSclk;
232
233 uint8_t AcpiReq;
234 uint8_t AcpiAck;
235 uint8_t GfxClkSlow;
236 uint8_t GpioClampMode; /* bit0 = VRHOT: bit1 = THERM: bit2 = DC */
237
238 uint8_t FpsFilterWeight;
239 uint8_t EnabledLevelsChange;
240 uint8_t DteClampMode;
241 uint8_t FpsClampMode;
242
243 uint16_t LevelResidencyCounters[SMU72_MAX_LEVELS_GRAPHICS];
244 uint16_t LevelSwitchCounters[SMU72_MAX_LEVELS_GRAPHICS];
245
246 void (*TargetStateCalculator)(uint8_t);
247 void (*SavedTargetStateCalculator)(uint8_t);
248
249 uint16_t AutoDpmInterval;
250 uint16_t AutoDpmRange;
251
252 uint8_t FpsEnabled;
253 uint8_t MaxPerfLevel;
254 uint8_t AllowLowClkInterruptToHost;
255 uint8_t FpsRunning;
256
257 uint32_t MaxAllowedFrequency;
258
259 uint32_t FilteredSclkFrequency;
260 uint32_t LastSclkFrequency;
261 uint32_t FilteredSclkFrequencyCnt;
262};
263
264typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
265
266#define SMU7_MAX_VOLTAGE_CLIENTS 12
267
268typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
269
270struct SMU_VoltageLevel {
271 uint8_t Vddc;
272 uint8_t Vddci;
273 uint8_t VddGfx;
274 uint8_t Phases;
275};
276
277typedef struct SMU_VoltageLevel SMU_VoltageLevel;
278
279struct SMU7_VoltageScoreboard {
280 SMU_VoltageLevel CurrentVoltage;
281 SMU_VoltageLevel TargetVoltage;
282 uint16_t MaxVid;
283 uint8_t HighestVidOffset;
284 uint8_t CurrentVidOffset;
285
286 uint8_t ControllerBusy;
287 uint8_t CurrentVid;
288 uint8_t CurrentVddciVid;
289 uint8_t VddGfxShutdown; /* 0 = normal mode, 1 = shut down */
290
291 SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
292 uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
293
294 uint8_t TargetIndex;
295 uint8_t Delay;
296 uint8_t ControllerEnable;
297 uint8_t ControllerRunning;
298 uint16_t CurrentStdVoltageHiSidd;
299 uint16_t CurrentStdVoltageLoSidd;
300 uint8_t OverrideVoltage;
301 uint8_t VddcUseUlvOffset;
302 uint8_t VddGfxUseUlvOffset;
303 uint8_t padding;
304
305 VoltageChangeHandler_t ChangeVddc;
306 VoltageChangeHandler_t ChangeVddGfx;
307 VoltageChangeHandler_t ChangeVddci;
308 VoltageChangeHandler_t ChangePhase;
309 VoltageChangeHandler_t ChangeMvdd;
310
311 VoltageChangeHandler_t functionLinks[6];
312
313 uint8_t *VddcFollower1;
314 uint8_t *VddcFollower2;
315 int16_t Driver_OD_RequestedVidOffset1;
316 int16_t Driver_OD_RequestedVidOffset2;
317
318};
319
320typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
321
322#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
323
324struct SMU7_PCIeLinkSpeedScoreboard {
325 uint8_t DpmEnable;
326 uint8_t DpmRunning;
327 uint8_t DpmForce;
328 uint8_t DpmForceLevel;
329
330 uint8_t CurrentLinkSpeed;
331 uint8_t EnabledLevelsChange;
332 uint16_t AutoDpmInterval;
333
334 uint16_t AutoDpmRange;
335 uint16_t AutoDpmCount;
336
337 uint8_t DpmMode;
338 uint8_t AcpiReq;
339 uint8_t AcpiAck;
340 uint8_t CurrentLinkLevel;
341
342};
343
344typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
345
346/* -------------------------------------------------------- CAC table ------------------------------------------------------ */
347#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
348#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
349#define SMU7_SCALE_I 7
350#define SMU7_SCALE_R 12
351
352struct SMU7_PowerScoreboard {
353 PowerCalculatorData_t VddGfxPowerData[SID_OPTION_COUNT];
354 PowerCalculatorData_t VddcPowerData[SID_OPTION_COUNT];
355
356 uint32_t TotalGpuPower;
357 uint32_t TdcCurrent;
358
359 uint16_t VddciTotalPower;
360 uint16_t sparesasfsdfd;
361 uint16_t Vddr1Power;
362 uint16_t RocPower;
363
364 uint16_t CalcMeasPowerBlend;
365 uint8_t SidOptionPower;
366 uint8_t SidOptionCurrent;
367
368 uint32_t WinTime;
369
370 uint16_t Telemetry_1_slope;
371 uint16_t Telemetry_2_slope;
372 int32_t Telemetry_1_offset;
373 int32_t Telemetry_2_offset;
374
375 uint32_t VddcCurrentTelemetry;
376 uint32_t VddGfxCurrentTelemetry;
377 uint32_t VddcPowerTelemetry;
378 uint32_t VddGfxPowerTelemetry;
379 uint32_t VddciPowerTelemetry;
380
381 uint32_t VddcPower;
382 uint32_t VddGfxPower;
383 uint32_t VddciPower;
384
385 uint32_t TelemetryCurrent[2];
386 uint32_t TelemetryVoltage[2];
387 uint32_t TelemetryPower[2];
388};
389
390typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
391
392struct SMU7_ThermalScoreboard {
393 int16_t GpuLimit;
394 int16_t GpuHyst;
395 uint16_t CurrGnbTemp;
396 uint16_t FilteredGnbTemp;
397
398 uint8_t ControllerEnable;
399 uint8_t ControllerRunning;
400 uint8_t AutoTmonCalInterval;
401 uint8_t AutoTmonCalEnable;
402
403 uint8_t ThermalDpmEnabled;
404 uint8_t SclkEnabledMask;
405 uint8_t spare[2];
406 int32_t temperature_gradient;
407
408 SMU7_HystController_Data HystControllerData;
409 int32_t WeightedSensorTemperature;
410 uint16_t TemperatureLimit[SMU72_MAX_LEVELS_GRAPHICS];
411 uint32_t Alpha;
412};
413
414typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
415
416/* For FeatureEnables: */
417#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
418#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
419#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
420#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
421#define SMU7_UVD_DPM_CONFIG_MASK 0x10
422#define SMU7_VCE_DPM_CONFIG_MASK 0x20
423#define SMU7_ACP_DPM_CONFIG_MASK 0x40
424#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
425#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
426
427#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
428#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
429#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
430#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
431#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
432#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
433
434/* All 'soft registers' should be uint32_t. */
435struct SMU72_SoftRegisters {
436 uint32_t RefClockFrequency;
437 uint32_t PmTimerPeriod;
438 uint32_t FeatureEnables;
439
440 uint32_t PreVBlankGap;
441 uint32_t VBlankTimeout;
442 uint32_t TrainTimeGap;
443
444 uint32_t MvddSwitchTime;
445 uint32_t LongestAcpiTrainTime;
446 uint32_t AcpiDelay;
447 uint32_t G5TrainTime;
448 uint32_t DelayMpllPwron;
449 uint32_t VoltageChangeTimeout;
450
451 uint32_t HandshakeDisables;
452
453 uint8_t DisplayPhy1Config;
454 uint8_t DisplayPhy2Config;
455 uint8_t DisplayPhy3Config;
456 uint8_t DisplayPhy4Config;
457
458 uint8_t DisplayPhy5Config;
459 uint8_t DisplayPhy6Config;
460 uint8_t DisplayPhy7Config;
461 uint8_t DisplayPhy8Config;
462
463 uint32_t AverageGraphicsActivity;
464 uint32_t AverageMemoryActivity;
465 uint32_t AverageGioActivity;
466
467 uint8_t SClkDpmEnabledLevels;
468 uint8_t MClkDpmEnabledLevels;
469 uint8_t LClkDpmEnabledLevels;
470 uint8_t PCIeDpmEnabledLevels;
471
472 uint8_t UVDDpmEnabledLevels;
473 uint8_t SAMUDpmEnabledLevels;
474 uint8_t ACPDpmEnabledLevels;
475 uint8_t VCEDpmEnabledLevels;
476
477 uint32_t DRAM_LOG_ADDR_H;
478 uint32_t DRAM_LOG_ADDR_L;
479 uint32_t DRAM_LOG_PHY_ADDR_H;
480 uint32_t DRAM_LOG_PHY_ADDR_L;
481 uint32_t DRAM_LOG_BUFF_SIZE;
482 uint32_t UlvEnterCount;
483 uint32_t UlvTime;
484 uint32_t UcodeLoadStatus;
485 uint32_t Reserved[2];
486
487};
488
489typedef struct SMU72_SoftRegisters SMU72_SoftRegisters;
490
491struct SMU72_Firmware_Header {
492 uint32_t Digest[5];
493 uint32_t Version;
494 uint32_t HeaderSize;
495 uint32_t Flags;
496 uint32_t EntryPoint;
497 uint32_t CodeSize;
498 uint32_t ImageSize;
499
500 uint32_t Rtos;
501 uint32_t SoftRegisters;
502 uint32_t DpmTable;
503 uint32_t FanTable;
504 uint32_t CacConfigTable;
505 uint32_t CacStatusTable;
506 uint32_t mcRegisterTable;
507 uint32_t mcArbDramTimingTable;
508 uint32_t PmFuseTable;
509 uint32_t Globals;
510 uint32_t ClockStretcherTable;
511 uint32_t Reserved[41];
512 uint32_t Signature;
513};
514
515typedef struct SMU72_Firmware_Header SMU72_Firmware_Header;
516
517#define SMU72_FIRMWARE_HEADER_LOCATION 0x20000
518
519enum DisplayConfig {
520 PowerDown = 1,
521 DP54x4,
522 DP54x2,
523 DP54x1,
524 DP27x4,
525 DP27x2,
526 DP27x1,
527 HDMI297,
528 HDMI162,
529 LVDS,
530 DP324x4,
531 DP324x2,
532 DP324x1
533};
534
535#define MC_BLOCK_COUNT 1
536#define CPL_BLOCK_COUNT 5
537#define SE_BLOCK_COUNT 15
538#define GC_BLOCK_COUNT 24
539
540struct SMU7_Local_Cac {
541 uint8_t BlockId;
542 uint8_t SignalId;
543 uint8_t Threshold;
544 uint8_t Padding;
545};
546
547typedef struct SMU7_Local_Cac SMU7_Local_Cac;
548
549struct SMU7_Local_Cac_Table {
550 SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
551 SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
552 SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
553 SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
554};
555
556typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
557
558#if !defined(SMC_MICROCODE)
559#pragma pack(pop)
560#endif
561
562/* Description of Clock Gating bitmask for Tonga: */
563/* System Clock Gating */
564#define CG_SYS_BITMASK_FIRST_BIT 0 /* First bit of Sys CG bitmask */
565#define CG_SYS_BITMASK_LAST_BIT 9 /* Last bit of Sys CG bitmask */
566#define CG_SYS_BIF_MGLS_SHIFT 0
567#define CG_SYS_ROM_SHIFT 1
568#define CG_SYS_MC_MGCG_SHIFT 2
569#define CG_SYS_MC_MGLS_SHIFT 3
570#define CG_SYS_SDMA_MGCG_SHIFT 4
571#define CG_SYS_SDMA_MGLS_SHIFT 5
572#define CG_SYS_DRM_MGCG_SHIFT 6
573#define CG_SYS_HDP_MGCG_SHIFT 7
574#define CG_SYS_HDP_MGLS_SHIFT 8
575#define CG_SYS_DRM_MGLS_SHIFT 9
576
577#define CG_SYS_BIF_MGLS_MASK 0x1
578#define CG_SYS_ROM_MASK 0x2
579#define CG_SYS_MC_MGCG_MASK 0x4
580#define CG_SYS_MC_MGLS_MASK 0x8
581#define CG_SYS_SDMA_MGCG_MASK 0x10
582#define CG_SYS_SDMA_MGLS_MASK 0x20
583#define CG_SYS_DRM_MGCG_MASK 0x40
584#define CG_SYS_HDP_MGCG_MASK 0x80
585#define CG_SYS_HDP_MGLS_MASK 0x100
586#define CG_SYS_DRM_MGLS_MASK 0x200
587
588/* Graphics Clock Gating */
589#define CG_GFX_BITMASK_FIRST_BIT 16 /* First bit of Gfx CG bitmask */
590#define CG_GFX_BITMASK_LAST_BIT 20 /* Last bit of Gfx CG bitmask */
591#define CG_GFX_CGCG_SHIFT 16
592#define CG_GFX_CGLS_SHIFT 17
593#define CG_CPF_MGCG_SHIFT 18
594#define CG_RLC_MGCG_SHIFT 19
595#define CG_GFX_OTHERS_MGCG_SHIFT 20
596
597#define CG_GFX_CGCG_MASK 0x00010000
598#define CG_GFX_CGLS_MASK 0x00020000
599#define CG_CPF_MGCG_MASK 0x00040000
600#define CG_RLC_MGCG_MASK 0x00080000
601#define CG_GFX_OTHERS_MGCG_MASK 0x00100000
602
603/* Voltage Regulator Configuration */
604/* VR Config info is contained in dpmTable.VRConfig */
605
606#define VRCONF_VDDC_MASK 0x000000FF
607#define VRCONF_VDDC_SHIFT 0
608#define VRCONF_VDDGFX_MASK 0x0000FF00
609#define VRCONF_VDDGFX_SHIFT 8
610#define VRCONF_VDDCI_MASK 0x00FF0000
611#define VRCONF_VDDCI_SHIFT 16
612#define VRCONF_MVDD_MASK 0xFF000000
613#define VRCONF_MVDD_SHIFT 24
614
615#define VR_MERGED_WITH_VDDC 0
616#define VR_SVI2_PLANE_1 1
617#define VR_SVI2_PLANE_2 2
618#define VR_SMIO_PATTERN_1 3
619#define VR_SMIO_PATTERN_2 4
620#define VR_STATIC_VOLTAGE 5
621
622/* Clock Stretcher Configuration */
623
624#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
625#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
626
627/* The 'settings' field is subdivided in the following way: */
628#define CLOCK_STRETCHER_SETTING_DDT_MASK 0x01
629#define CLOCK_STRETCHER_SETTING_DDT_SHIFT 0x0
630#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK 0x1E
631#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
632#define CLOCK_STRETCHER_SETTING_ENABLE_MASK 0x80
633#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT 0x7
634
635struct SMU_ClockStretcherDataTableEntry {
636 uint8_t minVID;
637 uint8_t maxVID;
638
639 uint16_t setting;
640};
641typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
642
643struct SMU_ClockStretcherDataTable {
644 SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
645};
646typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
647
648struct SMU_CKS_LOOKUPTableEntry {
649 uint16_t minFreq;
650 uint16_t maxFreq;
651
652 uint8_t setting;
653 uint8_t padding[3];
654};
655typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
656
657struct SMU_CKS_LOOKUPTable {
658 SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
659};
660typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
661
662#endif
663
664
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
new file mode 100644
index 000000000000..98f76e925e65
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
@@ -0,0 +1,760 @@
1#ifndef SMU72_DISCRETE_H
2#define SMU72_DISCRETE_H
3
4#include "smu72.h"
5
6#if !defined(SMC_MICROCODE)
7#pragma pack(push, 1)
8#endif
9
10struct SMIO_Pattern {
11 uint16_t Voltage;
12 uint8_t Smio;
13 uint8_t padding;
14};
15
16typedef struct SMIO_Pattern SMIO_Pattern;
17
18struct SMIO_Table {
19 SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
20};
21
22typedef struct SMIO_Table SMIO_Table;
23
24struct SMU72_Discrete_GraphicsLevel {
25 SMU_VoltageLevel MinVoltage;
26
27 uint32_t SclkFrequency;
28
29 uint8_t pcieDpmLevel;
30 uint8_t DeepSleepDivId;
31 uint16_t ActivityLevel;
32
33 uint32_t CgSpllFuncCntl3;
34 uint32_t CgSpllFuncCntl4;
35 uint32_t SpllSpreadSpectrum;
36 uint32_t SpllSpreadSpectrum2;
37 uint32_t CcPwrDynRm;
38 uint32_t CcPwrDynRm1;
39 uint8_t SclkDid;
40 uint8_t DisplayWatermark;
41 uint8_t EnabledForActivity;
42 uint8_t EnabledForThrottle;
43 uint8_t UpHyst;
44 uint8_t DownHyst;
45 uint8_t VoltageDownHyst;
46 uint8_t PowerThrottle;
47};
48
49typedef struct SMU72_Discrete_GraphicsLevel SMU72_Discrete_GraphicsLevel;
50
51struct SMU72_Discrete_ACPILevel {
52 uint32_t Flags;
53 SMU_VoltageLevel MinVoltage;
54 uint32_t SclkFrequency;
55 uint8_t SclkDid;
56 uint8_t DisplayWatermark;
57 uint8_t DeepSleepDivId;
58 uint8_t padding;
59 uint32_t CgSpllFuncCntl;
60 uint32_t CgSpllFuncCntl2;
61 uint32_t CgSpllFuncCntl3;
62 uint32_t CgSpllFuncCntl4;
63 uint32_t SpllSpreadSpectrum;
64 uint32_t SpllSpreadSpectrum2;
65 uint32_t CcPwrDynRm;
66 uint32_t CcPwrDynRm1;
67};
68
69typedef struct SMU72_Discrete_ACPILevel SMU72_Discrete_ACPILevel;
70
71struct SMU72_Discrete_Ulv {
72 uint32_t CcPwrDynRm;
73 uint32_t CcPwrDynRm1;
74 uint16_t VddcOffset;
75 uint8_t VddcOffsetVid;
76 uint8_t VddcPhase;
77 uint32_t Reserved;
78};
79
80typedef struct SMU72_Discrete_Ulv SMU72_Discrete_Ulv;
81
82struct SMU72_Discrete_MemoryLevel {
83 SMU_VoltageLevel MinVoltage;
84 uint32_t MinMvdd;
85
86 uint32_t MclkFrequency;
87
88 uint8_t EdcReadEnable;
89 uint8_t EdcWriteEnable;
90 uint8_t RttEnable;
91 uint8_t StutterEnable;
92
93 uint8_t StrobeEnable;
94 uint8_t StrobeRatio;
95 uint8_t EnabledForThrottle;
96 uint8_t EnabledForActivity;
97
98 uint8_t UpHyst;
99 uint8_t DownHyst;
100 uint8_t VoltageDownHyst;
101 uint8_t padding;
102
103 uint16_t ActivityLevel;
104 uint8_t DisplayWatermark;
105 uint8_t padding1;
106
107 uint32_t MpllFuncCntl;
108 uint32_t MpllFuncCntl_1;
109 uint32_t MpllFuncCntl_2;
110 uint32_t MpllAdFuncCntl;
111 uint32_t MpllDqFuncCntl;
112 uint32_t MclkPwrmgtCntl;
113 uint32_t DllCntl;
114 uint32_t MpllSs1;
115 uint32_t MpllSs2;
116};
117
118typedef struct SMU72_Discrete_MemoryLevel SMU72_Discrete_MemoryLevel;
119
120struct SMU72_Discrete_LinkLevel {
121 uint8_t PcieGenSpeed; /*< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 */
122 uint8_t PcieLaneCount; /*< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 */
123 uint8_t EnabledForActivity;
124 uint8_t SPC;
125 uint32_t DownThreshold;
126 uint32_t UpThreshold;
127 uint32_t Reserved;
128};
129
130typedef struct SMU72_Discrete_LinkLevel SMU72_Discrete_LinkLevel;
131
132/* MC ARB DRAM Timing registers. */
133struct SMU72_Discrete_MCArbDramTimingTableEntry {
134 uint32_t McArbDramTiming;
135 uint32_t McArbDramTiming2;
136 uint8_t McArbBurstTime;
137 uint8_t padding[3];
138};
139
140typedef struct SMU72_Discrete_MCArbDramTimingTableEntry SMU72_Discrete_MCArbDramTimingTableEntry;
141
142struct SMU72_Discrete_MCArbDramTimingTable {
143 SMU72_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
144};
145
146typedef struct SMU72_Discrete_MCArbDramTimingTable SMU72_Discrete_MCArbDramTimingTable;
147
148/* UVD VCLK/DCLK state (level) definition. */
149struct SMU72_Discrete_UvdLevel {
150 uint32_t VclkFrequency;
151 uint32_t DclkFrequency;
152 SMU_VoltageLevel MinVoltage;
153 uint8_t VclkDivider;
154 uint8_t DclkDivider;
155 uint8_t padding[2];
156};
157
158typedef struct SMU72_Discrete_UvdLevel SMU72_Discrete_UvdLevel;
159
160/* Clocks for other external blocks (VCE, ACP, SAMU). */
161struct SMU72_Discrete_ExtClkLevel {
162 uint32_t Frequency;
163 SMU_VoltageLevel MinVoltage;
164 uint8_t Divider;
165 uint8_t padding[3];
166};
167
168typedef struct SMU72_Discrete_ExtClkLevel SMU72_Discrete_ExtClkLevel;
169
170struct SMU72_Discrete_StateInfo {
171 uint32_t SclkFrequency;
172 uint32_t MclkFrequency;
173 uint32_t VclkFrequency;
174 uint32_t DclkFrequency;
175 uint32_t SamclkFrequency;
176 uint32_t AclkFrequency;
177 uint32_t EclkFrequency;
178 uint16_t MvddVoltage;
179 uint16_t padding16;
180 uint8_t DisplayWatermark;
181 uint8_t McArbIndex;
182 uint8_t McRegIndex;
183 uint8_t SeqIndex;
184 uint8_t SclkDid;
185 int8_t SclkIndex;
186 int8_t MclkIndex;
187 uint8_t PCIeGen;
188
189};
190
191typedef struct SMU72_Discrete_StateInfo SMU72_Discrete_StateInfo;
192
193struct SMU72_Discrete_DpmTable {
194 /* Multi-DPM controller settings */
195 SMU72_PIDController GraphicsPIDController;
196 SMU72_PIDController MemoryPIDController;
197 SMU72_PIDController LinkPIDController;
198
199 uint32_t SystemFlags;
200
201 /* SMIO masks for voltage and phase controls */
202 uint32_t VRConfig;
203 uint32_t SmioMask1;
204 uint32_t SmioMask2;
205 SMIO_Table SmioTable1;
206 SMIO_Table SmioTable2;
207
208 uint32_t VddcLevelCount;
209 uint32_t VddciLevelCount;
210 uint32_t VddGfxLevelCount;
211 uint32_t MvddLevelCount;
212
213 uint16_t VddcTable[SMU72_MAX_LEVELS_VDDC];
214 uint16_t VddGfxTable[SMU72_MAX_LEVELS_VDDGFX];
215 uint16_t VddciTable[SMU72_MAX_LEVELS_VDDCI];
216
217 uint8_t BapmVddGfxVidHiSidd[SMU72_MAX_LEVELS_VDDGFX];
218 uint8_t BapmVddGfxVidLoSidd[SMU72_MAX_LEVELS_VDDGFX];
219 uint8_t BapmVddGfxVidHiSidd2[SMU72_MAX_LEVELS_VDDGFX];
220
221 uint8_t BapmVddcVidHiSidd[SMU72_MAX_LEVELS_VDDC];
222 uint8_t BapmVddcVidLoSidd[SMU72_MAX_LEVELS_VDDC];
223 uint8_t BapmVddcVidHiSidd2[SMU72_MAX_LEVELS_VDDC];
224
225 uint8_t GraphicsDpmLevelCount;
226 uint8_t MemoryDpmLevelCount;
227 uint8_t LinkLevelCount;
228 uint8_t MasterDeepSleepControl;
229
230 uint8_t UvdLevelCount;
231 uint8_t VceLevelCount;
232 uint8_t AcpLevelCount;
233 uint8_t SamuLevelCount;
234
235 uint8_t ThermOutGpio;
236 uint8_t ThermOutPolarity;
237 uint8_t ThermOutMode;
238 uint8_t DPMFreezeAndForced;
239 uint32_t Reserved[4];
240
241 /* State table entries for each DPM state */
242 SMU72_Discrete_GraphicsLevel GraphicsLevel[SMU72_MAX_LEVELS_GRAPHICS];
243 SMU72_Discrete_MemoryLevel MemoryACPILevel;
244 SMU72_Discrete_MemoryLevel MemoryLevel[SMU72_MAX_LEVELS_MEMORY];
245 SMU72_Discrete_LinkLevel LinkLevel[SMU72_MAX_LEVELS_LINK];
246 SMU72_Discrete_ACPILevel ACPILevel;
247 SMU72_Discrete_UvdLevel UvdLevel[SMU72_MAX_LEVELS_UVD];
248 SMU72_Discrete_ExtClkLevel VceLevel[SMU72_MAX_LEVELS_VCE];
249 SMU72_Discrete_ExtClkLevel AcpLevel[SMU72_MAX_LEVELS_ACP];
250 SMU72_Discrete_ExtClkLevel SamuLevel[SMU72_MAX_LEVELS_SAMU];
251 SMU72_Discrete_Ulv Ulv;
252
253 uint32_t SclkStepSize;
254 uint32_t Smio[SMU72_MAX_ENTRIES_SMIO];
255
256 uint8_t UvdBootLevel;
257 uint8_t VceBootLevel;
258 uint8_t AcpBootLevel;
259 uint8_t SamuBootLevel;
260
261 uint8_t GraphicsBootLevel;
262 uint8_t GraphicsVoltageChangeEnable;
263 uint8_t GraphicsThermThrottleEnable;
264 uint8_t GraphicsInterval;
265
266 uint8_t VoltageInterval;
267 uint8_t ThermalInterval;
268 uint16_t TemperatureLimitHigh;
269
270 uint16_t TemperatureLimitLow;
271 uint8_t MemoryBootLevel;
272 uint8_t MemoryVoltageChangeEnable;
273
274 uint16_t BootMVdd;
275 uint8_t MemoryInterval;
276 uint8_t MemoryThermThrottleEnable;
277
278 uint16_t VoltageResponseTime;
279 uint16_t PhaseResponseTime;
280
281 uint8_t PCIeBootLinkLevel;
282 uint8_t PCIeGenInterval;
283 uint8_t DTEInterval;
284 uint8_t DTEMode;
285
286 uint8_t SVI2Enable;
287 uint8_t VRHotGpio;
288 uint8_t AcDcGpio;
289 uint8_t ThermGpio;
290
291 uint16_t PPM_PkgPwrLimit;
292 uint16_t PPM_TemperatureLimit;
293
294 uint16_t DefaultTdp;
295 uint16_t TargetTdp;
296
297 uint16_t FpsHighThreshold;
298 uint16_t FpsLowThreshold;
299
300 uint16_t BAPMTI_R[SMU72_DTE_ITERATIONS][SMU72_DTE_SOURCES][SMU72_DTE_SINKS];
301 uint16_t BAPMTI_RC[SMU72_DTE_ITERATIONS][SMU72_DTE_SOURCES][SMU72_DTE_SINKS];
302
303 uint8_t DTEAmbientTempBase;
304 uint8_t DTETjOffset;
305 uint8_t GpuTjMax;
306 uint8_t GpuTjHyst;
307
308 SMU_VoltageLevel BootVoltage;
309
310 uint32_t BAPM_TEMP_GRADIENT;
311
312 uint32_t LowSclkInterruptThreshold;
313 uint32_t VddGfxReChkWait;
314
315 uint8_t ClockStretcherAmount;
316
317 uint8_t Sclk_CKS_masterEn0_7;
318 uint8_t Sclk_CKS_masterEn8_15;
319 uint8_t padding[1];
320
321 uint8_t Sclk_voltageOffset[8];
322
323 SMU_ClockStretcherDataTable ClockStretcherDataTable;
324 SMU_CKS_LOOKUPTable CKS_LOOKUPTable;
325};
326
327typedef struct SMU72_Discrete_DpmTable SMU72_Discrete_DpmTable;
328
329/* --------------------------------------------------- AC Timing Parameters ------------------------------------------------ */
330#define SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
331#define SMU72_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU72_MAX_LEVELS_MEMORY /* DPM */
332
333struct SMU72_Discrete_MCRegisterAddress {
334 uint16_t s0;
335 uint16_t s1;
336};
337
338typedef struct SMU72_Discrete_MCRegisterAddress SMU72_Discrete_MCRegisterAddress;
339
340struct SMU72_Discrete_MCRegisterSet {
341 uint32_t value[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
342};
343
344typedef struct SMU72_Discrete_MCRegisterSet SMU72_Discrete_MCRegisterSet;
345
346struct SMU72_Discrete_MCRegisters {
347 uint8_t last;
348 uint8_t reserved[3];
349 SMU72_Discrete_MCRegisterAddress address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
350 SMU72_Discrete_MCRegisterSet data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
351};
352
353typedef struct SMU72_Discrete_MCRegisters SMU72_Discrete_MCRegisters;
354
355
356/* --------------------------------------------------- Fan Table ----------------------------------------------------------- */
357
358struct SMU72_Discrete_FanTable {
359 uint16_t FdoMode;
360 int16_t TempMin;
361 int16_t TempMed;
362 int16_t TempMax;
363 int16_t Slope1;
364 int16_t Slope2;
365 int16_t FdoMin;
366 int16_t HystUp;
367 int16_t HystDown;
368 int16_t HystSlope;
369 int16_t TempRespLim;
370 int16_t TempCurr;
371 int16_t SlopeCurr;
372 int16_t PwmCurr;
373 uint32_t RefreshPeriod;
374 int16_t FdoMax;
375 uint8_t TempSrc;
376 int8_t FanControl_GL_Flag;
377};
378
379typedef struct SMU72_Discrete_FanTable SMU72_Discrete_FanTable;
380
381#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
382#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
383
384struct SMU7_MclkDpmScoreboard {
385
386 uint32_t PercentageBusy;
387
388 int32_t PIDError;
389 int32_t PIDIntegral;
390 int32_t PIDOutput;
391
392 uint32_t SigmaDeltaAccum;
393 uint32_t SigmaDeltaOutput;
394 uint32_t SigmaDeltaLevel;
395
396 uint32_t UtilizationSetpoint;
397
398 uint8_t TdpClampMode;
399 uint8_t TdcClampMode;
400 uint8_t ThermClampMode;
401 uint8_t VoltageBusy;
402
403 int8_t CurrLevel;
404 int8_t TargLevel;
405 uint8_t LevelChangeInProgress;
406 uint8_t UpHyst;
407
408 uint8_t DownHyst;
409 uint8_t VoltageDownHyst;
410 uint8_t DpmEnable;
411 uint8_t DpmRunning;
412
413 uint8_t DpmForce;
414 uint8_t DpmForceLevel;
415 uint8_t DisplayWatermark;
416 uint8_t McArbIndex;
417
418 uint32_t MinimumPerfMclk;
419
420 uint8_t AcpiReq;
421 uint8_t AcpiAck;
422 uint8_t MclkSwitchInProgress;
423 uint8_t MclkSwitchCritical;
424
425 uint8_t IgnoreVBlank;
426 uint8_t TargetMclkIndex;
427 uint8_t TargetMvddIndex;
428 uint8_t MclkSwitchResult;
429
430 uint16_t VbiFailureCount;
431 uint8_t VbiWaitCounter;
432 uint8_t EnabledLevelsChange;
433
434 uint16_t LevelResidencyCountersN[SMU72_MAX_LEVELS_MEMORY];
435 uint16_t LevelSwitchCounters[SMU72_MAX_LEVELS_MEMORY];
436
437 void (*TargetStateCalculator)(uint8_t);
438 void (*SavedTargetStateCalculator)(uint8_t);
439
440 uint16_t AutoDpmInterval;
441 uint16_t AutoDpmRange;
442
443 uint16_t VbiTimeoutCount;
444 uint16_t MclkSwitchingTime;
445
446 uint8_t fastSwitch;
447 uint8_t Save_PIC_VDDGFX_EXIT;
448 uint8_t Save_PIC_VDDGFX_ENTER;
449 uint8_t padding;
450
451};
452
453typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
454
455struct SMU7_UlvScoreboard {
456 uint8_t EnterUlv;
457 uint8_t ExitUlv;
458 uint8_t UlvActive;
459 uint8_t WaitingForUlv;
460 uint8_t UlvEnable;
461 uint8_t UlvRunning;
462 uint8_t UlvMasterEnable;
463 uint8_t padding;
464 uint32_t UlvAbortedCount;
465 uint32_t UlvTimeStamp;
466};
467
468typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
469
470struct VddgfxSavedRegisters {
471 uint32_t GPU_DBG[3];
472 uint32_t MEC_BaseAddress_Hi;
473 uint32_t MEC_BaseAddress_Lo;
474 uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
475 uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
476 uint32_t CP_INT_CNTL;
477};
478
479typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
480
481struct SMU7_VddGfxScoreboard {
482 uint8_t VddGfxEnable;
483 uint8_t VddGfxActive;
484 uint8_t VPUResetOccured;
485 uint8_t padding;
486
487 uint32_t VddGfxEnteredCount;
488 uint32_t VddGfxAbortedCount;
489
490 uint32_t VddGfxVid;
491
492 VddgfxSavedRegisters SavedRegisters;
493};
494
495typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
496
497struct SMU7_TdcLimitScoreboard {
498 uint8_t Enable;
499 uint8_t Running;
500 uint16_t Alpha;
501 uint32_t FilteredIddc;
502 uint32_t IddcLimit;
503 uint32_t IddcHyst;
504 SMU7_HystController_Data HystControllerData;
505};
506
507typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
508
509struct SMU7_PkgPwrLimitScoreboard {
510 uint8_t Enable;
511 uint8_t Running;
512 uint16_t Alpha;
513 uint32_t FilteredPkgPwr;
514 uint32_t Limit;
515 uint32_t Hyst;
516 uint32_t LimitFromDriver;
517 SMU7_HystController_Data HystControllerData;
518};
519
520typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
521
522struct SMU7_BapmScoreboard {
523 uint32_t source_powers[SMU72_DTE_SOURCES];
524 uint32_t source_powers_last[SMU72_DTE_SOURCES];
525 int32_t entity_temperatures[SMU72_NUM_GPU_TES];
526 int32_t initial_entity_temperatures[SMU72_NUM_GPU_TES];
527 int32_t Limit;
528 int32_t Hyst;
529 int32_t therm_influence_coeff_table[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS * 2];
530 int32_t therm_node_table[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
531 uint16_t ConfigTDPPowerScalar;
532 uint16_t FanSpeedPowerScalar;
533 uint16_t OverDrivePowerScalar;
534 uint16_t OverDriveLimitScalar;
535 uint16_t FinalPowerScalar;
536 uint8_t VariantID;
537 uint8_t spare997;
538
539 SMU7_HystController_Data HystControllerData;
540
541 int32_t temperature_gradient_slope;
542 int32_t temperature_gradient;
543 uint32_t measured_temperature;
544};
545
546
547typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
548
549struct SMU7_AcpiScoreboard {
550 uint32_t SavedInterruptMask[2];
551 uint8_t LastACPIRequest;
552 uint8_t CgBifResp;
553 uint8_t RequestType;
554 uint8_t Padding;
555 SMU72_Discrete_ACPILevel D0Level;
556};
557
558typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
559
560struct SMU72_Discrete_PmFuses {
561 /* dw1 */
562 uint8_t SviLoadLineEn;
563 uint8_t SviLoadLineVddC;
564 uint8_t SviLoadLineTrimVddC;
565 uint8_t SviLoadLineOffsetVddC;
566
567 /* dw2 */
568 uint16_t TDC_VDDC_PkgLimit;
569 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
570 uint8_t TDC_MAWt;
571
572 /* dw3 */
573 uint8_t TdcWaterfallCtl;
574 uint8_t LPMLTemperatureMin;
575 uint8_t LPMLTemperatureMax;
576 uint8_t Reserved;
577
578 /* dw4-dw7 */
579 uint8_t LPMLTemperatureScaler[16];
580
581 /* dw8-dw9 */
582 int16_t FuzzyFan_ErrorSetDelta;
583 int16_t FuzzyFan_ErrorRateSetDelta;
584 int16_t FuzzyFan_PwmSetDelta;
585 uint16_t Reserved6;
586
587 /* dw10-dw14 */
588 uint8_t GnbLPML[16];
589
590 /* dw15 */
591 uint8_t GnbLPMLMaxVid;
592 uint8_t GnbLPMLMinVid;
593 uint8_t Reserved1[2];
594
595 /* dw16 */
596 uint16_t BapmVddCBaseLeakageHiSidd;
597 uint16_t BapmVddCBaseLeakageLoSidd;
598};
599
600typedef struct SMU72_Discrete_PmFuses SMU72_Discrete_PmFuses;
601
602struct SMU7_Discrete_Log_Header_Table {
603 uint32_t version;
604 uint32_t asic_id;
605 uint16_t flags;
606 uint16_t entry_size;
607 uint32_t total_size;
608 uint32_t num_of_entries;
609 uint8_t type;
610 uint8_t mode;
611 uint8_t filler_0[2];
612 uint32_t filler_1[2];
613};
614
615typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
616
617struct SMU7_Discrete_Log_Cntl {
618 uint8_t Enabled;
619 uint8_t Type;
620 uint8_t padding[2];
621 uint32_t BufferSize;
622 uint32_t SamplesLogged;
623 uint32_t SampleSize;
624 uint32_t AddrL;
625 uint32_t AddrH;
626};
627
628typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
629
630#define CAC_ACC_NW_NUM_OF_SIGNALS 87
631
632struct SMU7_Discrete_Cac_Collection_Table {
633 uint32_t temperature;
634 uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
635};
636
637typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
638
639struct SMU7_Discrete_Cac_Verification_Table {
640 uint32_t VddcTotalPower;
641 uint32_t VddcLeakagePower;
642 uint32_t VddcConstantPower;
643 uint32_t VddcGfxDynamicPower;
644 uint32_t VddcUvdDynamicPower;
645 uint32_t VddcVceDynamicPower;
646 uint32_t VddcAcpDynamicPower;
647 uint32_t VddcPcieDynamicPower;
648 uint32_t VddcDceDynamicPower;
649 uint32_t VddcCurrent;
650 uint32_t VddcVoltage;
651 uint32_t VddciTotalPower;
652 uint32_t VddciLeakagePower;
653 uint32_t VddciConstantPower;
654 uint32_t VddciDynamicPower;
655 uint32_t Vddr1TotalPower;
656 uint32_t Vddr1LeakagePower;
657 uint32_t Vddr1ConstantPower;
658 uint32_t Vddr1DynamicPower;
659 uint32_t spare[4];
660 uint32_t temperature;
661};
662
663typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
664
665struct SMU7_Discrete_Pm_Status_Table {
666 /* Thermal entities */
667 int32_t T_meas_max;
668 int32_t T_meas_acc;
669 int32_t T_calc_max;
670 int32_t T_calc_acc;
671 uint32_t P_scalar_acc;
672 uint32_t P_calc_max;
673 uint32_t P_calc_acc;
674
675 /*Voltage domains */
676 uint32_t I_calc_max;
677 uint32_t I_calc_acc;
678 uint32_t I_calc_acc_vddci;
679 uint32_t V_calc_noload_acc;
680 uint32_t V_calc_load_acc;
681 uint32_t V_calc_noload_acc_vddci;
682 uint32_t P_meas_acc;
683 uint32_t V_meas_noload_acc;
684 uint32_t V_meas_load_acc;
685 uint32_t I_meas_acc;
686 uint32_t P_meas_acc_vddci;
687 uint32_t V_meas_noload_acc_vddci;
688 uint32_t V_meas_load_acc_vddci;
689 uint32_t I_meas_acc_vddci;
690
691 /*Frequency */
692 uint16_t Sclk_dpm_residency[8];
693 uint16_t Uvd_dpm_residency[8];
694 uint16_t Vce_dpm_residency[8];
695 uint16_t Mclk_dpm_residency[4];
696
697 /*Chip */
698 uint32_t P_vddci_acc;
699 uint32_t P_vddr1_acc;
700 uint32_t P_nte1_acc;
701 uint32_t PkgPwr_max;
702 uint32_t PkgPwr_acc;
703 uint32_t MclkSwitchingTime_max;
704 uint32_t MclkSwitchingTime_acc;
705 uint32_t FanPwm_acc;
706 uint32_t FanRpm_acc;
707
708 uint32_t AccCnt;
709};
710
711typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
712
713/*FIXME THESE NEED TO BE UPDATED */
714#define SMU7_SCLK_CAC 0x561
715#define SMU7_MCLK_CAC 0xF9
716#define SMU7_VCLK_CAC 0x2DE
717#define SMU7_DCLK_CAC 0x2DE
718#define SMU7_ECLK_CAC 0x25E
719#define SMU7_ACLK_CAC 0x25E
720#define SMU7_SAMCLK_CAC 0x25E
721#define SMU7_DISPCLK_CAC 0x100
722#define SMU7_CAC_CONSTANT 0x2EE3430
723#define SMU7_CAC_CONSTANT_SHIFT 18
724
725#define SMU7_VDDCI_MCLK_CONST 1765
726#define SMU7_VDDCI_MCLK_CONST_SHIFT 16
727#define SMU7_VDDCI_VDDCI_CONST 50958
728#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
729#define SMU7_VDDCI_CONST 11781
730
731#define SMU7_12C_VDDCI_MCLK_CONST 1623
732#define SMU7_12C_VDDCI_MCLK_CONST_SHIFT 15
733#define SMU7_12C_VDDCI_VDDCI_CONST 40088
734#define SMU7_12C_VDDCI_VDDCI_CONST_SHIFT 13
735#define SMU7_12C_VDDCI_CONST 20856
736
737#define SMU7_VDDCI_STROBE_PWR 1331
738
739#define SMU7_VDDR1_CONST 693
740#define SMU7_VDDR1_CAC_WEIGHT 20
741#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
742#define SMU7_VDDR1_STROBE_PWR 512
743
744#define SMU7_AREA_COEFF_UVD 0xA78
745#define SMU7_AREA_COEFF_VCE 0x190A
746#define SMU7_AREA_COEFF_ACP 0x22D1
747#define SMU7_AREA_COEFF_SAMU 0x534
748
749/*ThermOutMode values */
750#define SMU7_THERM_OUT_MODE_DISABLE 0x0
751#define SMU7_THERM_OUT_MODE_THERM_ONLY 0x1
752#define SMU7_THERM_OUT_MODE_THERM_VRHOT 0x2
753
754#if !defined(SMC_MICROCODE)
755#pragma pack(pop)
756#endif
757
758
759#endif
760
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu73.h b/drivers/gpu/drm/amd/powerplay/inc/smu73.h
new file mode 100644
index 000000000000..c6b12a4c00db
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu73.h
@@ -0,0 +1,720 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _SMU73_H_
24#define _SMU73_H_
25
26#pragma pack(push, 1)
27enum SID_OPTION {
28 SID_OPTION_HI,
29 SID_OPTION_LO,
30 SID_OPTION_COUNT
31};
32
33enum Poly3rdOrderCoeff {
34 LEAKAGE_TEMPERATURE_SCALAR,
35 LEAKAGE_VOLTAGE_SCALAR,
36 DYNAMIC_VOLTAGE_SCALAR,
37 POLY_3RD_ORDER_COUNT
38};
39
40struct SMU7_Poly3rdOrder_Data
41{
42 int32_t a;
43 int32_t b;
44 int32_t c;
45 int32_t d;
46 uint8_t a_shift;
47 uint8_t b_shift;
48 uint8_t c_shift;
49 uint8_t x_shift;
50};
51
52typedef struct SMU7_Poly3rdOrder_Data SMU7_Poly3rdOrder_Data;
53
54struct Power_Calculator_Data
55{
56 uint16_t NoLoadVoltage;
57 uint16_t LoadVoltage;
58 uint16_t Resistance;
59 uint16_t Temperature;
60 uint16_t BaseLeakage;
61 uint16_t LkgTempScalar;
62 uint16_t LkgVoltScalar;
63 uint16_t LkgAreaScalar;
64 uint16_t LkgPower;
65 uint16_t DynVoltScalar;
66 uint32_t Cac;
67 uint32_t DynPower;
68 uint32_t TotalCurrent;
69 uint32_t TotalPower;
70};
71
72typedef struct Power_Calculator_Data PowerCalculatorData_t;
73
74struct Gc_Cac_Weight_Data
75{
76 uint8_t index;
77 uint32_t value;
78};
79
80typedef struct Gc_Cac_Weight_Data GcCacWeight_Data;
81
82
83typedef struct {
84 uint32_t high;
85 uint32_t low;
86} data_64_t;
87
88typedef struct {
89 data_64_t high;
90 data_64_t low;
91} data_128_t;
92
93#define SMU__NUM_SCLK_DPM_STATE 8
94#define SMU__NUM_MCLK_DPM_LEVELS 4
95#define SMU__NUM_LCLK_DPM_LEVELS 8
96#define SMU__NUM_PCIE_DPM_LEVELS 8
97
98#define SMU7_CONTEXT_ID_SMC 1
99#define SMU7_CONTEXT_ID_VBIOS 2
100
101#define SMU73_MAX_LEVELS_VDDC 16
102#define SMU73_MAX_LEVELS_VDDGFX 16
103#define SMU73_MAX_LEVELS_VDDCI 8
104#define SMU73_MAX_LEVELS_MVDD 4
105
106#define SMU_MAX_SMIO_LEVELS 4
107
108#define SMU73_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV
109#define SMU73_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM
110#define SMU73_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels
111#define SMU73_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes.
112#define SMU73_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD.
113#define SMU73_MAX_LEVELS_VCE 8 // ECLK levels for VCE.
114#define SMU73_MAX_LEVELS_ACP 8 // ACLK levels for ACP.
115#define SMU73_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU.
116#define SMU73_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table.
117
118#define DPM_NO_LIMIT 0
119#define DPM_NO_UP 1
120#define DPM_GO_DOWN 2
121#define DPM_GO_UP 3
122
123#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
124#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
125
126#define GPIO_CLAMP_MODE_VRHOT 1
127#define GPIO_CLAMP_MODE_THERM 2
128#define GPIO_CLAMP_MODE_DC 4
129
130#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
131#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
132#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
133#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
134#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
135#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
136#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
137#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
138#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
139#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
140#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
141#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
142#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
143#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
144#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
145#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
146#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
147#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
148#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
149#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
150
151// Virtualization Defines
152#define CG_XDMA_MASK 0x1
153#define CG_XDMA_SHIFT 0
154#define CG_UVD_MASK 0x2
155#define CG_UVD_SHIFT 1
156#define CG_VCE_MASK 0x4
157#define CG_VCE_SHIFT 2
158#define CG_SAMU_MASK 0x8
159#define CG_SAMU_SHIFT 3
160#define CG_GFX_MASK 0x10
161#define CG_GFX_SHIFT 4
162#define CG_SDMA_MASK 0x20
163#define CG_SDMA_SHIFT 5
164#define CG_HDP_MASK 0x40
165#define CG_HDP_SHIFT 6
166#define CG_MC_MASK 0x80
167#define CG_MC_SHIFT 7
168#define CG_DRM_MASK 0x100
169#define CG_DRM_SHIFT 8
170#define CG_ROM_MASK 0x200
171#define CG_ROM_SHIFT 9
172#define CG_BIF_MASK 0x400
173#define CG_BIF_SHIFT 10
174
175#define SMU73_DTE_ITERATIONS 5
176#define SMU73_DTE_SOURCES 3
177#define SMU73_DTE_SINKS 1
178#define SMU73_NUM_CPU_TES 0
179#define SMU73_NUM_GPU_TES 1
180#define SMU73_NUM_NON_TES 2
181#define SMU73_DTE_FAN_SCALAR_MIN 0x100
182#define SMU73_DTE_FAN_SCALAR_MAX 0x166
183#define SMU73_DTE_FAN_TEMP_MAX 93
184#define SMU73_DTE_FAN_TEMP_MIN 83
185
186#define SMU73_THERMAL_INPUT_LOOP_COUNT 6
187#define SMU73_THERMAL_CLAMP_MODE_COUNT 8
188
189
190struct SMU7_HystController_Data
191{
192 uint16_t waterfall_up;
193 uint16_t waterfall_down;
194 uint16_t waterfall_limit;
195 uint16_t release_cnt;
196 uint16_t release_limit;
197 uint16_t spare;
198};
199
200typedef struct SMU7_HystController_Data SMU7_HystController_Data;
201
202struct SMU73_PIDController
203{
204 uint32_t Ki;
205 int32_t LFWindupUpperLim;
206 int32_t LFWindupLowerLim;
207 uint32_t StatePrecision;
208
209 uint32_t LfPrecision;
210 uint32_t LfOffset;
211 uint32_t MaxState;
212 uint32_t MaxLfFraction;
213 uint32_t StateShift;
214};
215
216typedef struct SMU73_PIDController SMU73_PIDController;
217
218struct SMU7_LocalDpmScoreboard
219{
220 uint32_t PercentageBusy;
221
222 int32_t PIDError;
223 int32_t PIDIntegral;
224 int32_t PIDOutput;
225
226 uint32_t SigmaDeltaAccum;
227 uint32_t SigmaDeltaOutput;
228 uint32_t SigmaDeltaLevel;
229
230 uint32_t UtilizationSetpoint;
231
232 uint8_t TdpClampMode;
233 uint8_t TdcClampMode;
234 uint8_t ThermClampMode;
235 uint8_t VoltageBusy;
236
237 int8_t CurrLevel;
238 int8_t TargLevel;
239 uint8_t LevelChangeInProgress;
240 uint8_t UpHyst;
241
242 uint8_t DownHyst;
243 uint8_t VoltageDownHyst;
244 uint8_t DpmEnable;
245 uint8_t DpmRunning;
246
247 uint8_t DpmForce;
248 uint8_t DpmForceLevel;
249 uint8_t DisplayWatermark;
250 uint8_t McArbIndex;
251
252 uint32_t MinimumPerfSclk;
253
254 uint8_t AcpiReq;
255 uint8_t AcpiAck;
256 uint8_t GfxClkSlow;
257 uint8_t GpioClampMode;
258
259 uint8_t spare2;
260 uint8_t EnabledLevelsChange;
261 uint8_t DteClampMode;
262 uint8_t FpsClampMode;
263
264 uint16_t LevelResidencyCounters [SMU73_MAX_LEVELS_GRAPHICS];
265 uint16_t LevelSwitchCounters [SMU73_MAX_LEVELS_GRAPHICS];
266
267 void (*TargetStateCalculator)(uint8_t);
268 void (*SavedTargetStateCalculator)(uint8_t);
269
270 uint16_t AutoDpmInterval;
271 uint16_t AutoDpmRange;
272
273 uint8_t FpsEnabled;
274 uint8_t MaxPerfLevel;
275 uint8_t AllowLowClkInterruptToHost;
276 uint8_t FpsRunning;
277
278 uint32_t MaxAllowedFrequency;
279
280 uint32_t FilteredSclkFrequency;
281 uint32_t LastSclkFrequency;
282 uint32_t FilteredSclkFrequencyCnt;
283
284 uint8_t LedEnable;
285 uint8_t LedPin0;
286 uint8_t LedPin1;
287 uint8_t LedPin2;
288 uint32_t LedAndMask;
289
290 uint16_t FpsAlpha;
291 uint16_t DeltaTime;
292 uint32_t CurrentFps;
293 uint32_t FilteredFps;
294 uint32_t FrameCount;
295 uint32_t FrameCountLast;
296 uint16_t FpsTargetScalar;
297 uint16_t FpsWaterfallLimitScalar;
298 uint16_t FpsAlphaScalar;
299 uint16_t spare8;
300 SMU7_HystController_Data HystControllerData;
301};
302
303typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
304
305#define SMU7_MAX_VOLTAGE_CLIENTS 12
306
307typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
308
309#define VDDC_MASK 0x00007FFF
310#define VDDC_SHIFT 0
311#define VDDCI_MASK 0x3FFF8000
312#define VDDCI_SHIFT 15
313#define PHASES_MASK 0xC0000000
314#define PHASES_SHIFT 30
315
316typedef uint32_t SMU_VoltageLevel;
317
318struct SMU7_VoltageScoreboard
319{
320 SMU_VoltageLevel TargetVoltage;
321 uint16_t MaxVid;
322 uint8_t HighestVidOffset;
323 uint8_t CurrentVidOffset;
324
325 uint16_t CurrentVddc;
326 uint16_t CurrentVddci;
327
328
329 uint8_t ControllerBusy;
330 uint8_t CurrentVid;
331 uint8_t CurrentVddciVid;
332 uint8_t padding;
333
334 SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
335 SMU_VoltageLevel TargetVoltageState;
336 uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
337
338 uint8_t padding2;
339 uint8_t padding3;
340 uint8_t ControllerEnable;
341 uint8_t ControllerRunning;
342 uint16_t CurrentStdVoltageHiSidd;
343 uint16_t CurrentStdVoltageLoSidd;
344 uint8_t OverrideVoltage;
345 uint8_t padding4;
346 uint8_t padding5;
347 uint8_t CurrentPhases;
348
349 VoltageChangeHandler_t ChangeVddc;
350
351 VoltageChangeHandler_t ChangeVddci;
352 VoltageChangeHandler_t ChangePhase;
353 VoltageChangeHandler_t ChangeMvdd;
354
355 VoltageChangeHandler_t functionLinks[6];
356
357 uint16_t * VddcFollower1;
358
359 int16_t Driver_OD_RequestedVidOffset1;
360 int16_t Driver_OD_RequestedVidOffset2;
361
362};
363
364typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
365
366// -------------------------------------------------------------------------------------------------------------------------
367#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
368
369struct SMU7_PCIeLinkSpeedScoreboard
370{
371 uint8_t DpmEnable;
372 uint8_t DpmRunning;
373 uint8_t DpmForce;
374 uint8_t DpmForceLevel;
375
376 uint8_t CurrentLinkSpeed;
377 uint8_t EnabledLevelsChange;
378 uint16_t AutoDpmInterval;
379
380 uint16_t AutoDpmRange;
381 uint16_t AutoDpmCount;
382
383 uint8_t DpmMode;
384 uint8_t AcpiReq;
385 uint8_t AcpiAck;
386 uint8_t CurrentLinkLevel;
387
388};
389
390typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
391
392// -------------------------------------------------------- CAC table ------------------------------------------------------
393#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
394#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
395
396#define SMU7_SCALE_I 7
397#define SMU7_SCALE_R 12
398
399struct SMU7_PowerScoreboard
400{
401 uint32_t GpuPower;
402
403 uint32_t VddcPower;
404 uint32_t VddcVoltage;
405 uint32_t VddcCurrent;
406
407 uint32_t MvddPower;
408 uint32_t MvddVoltage;
409 uint32_t MvddCurrent;
410
411 uint32_t RocPower;
412
413 uint16_t Telemetry_1_slope;
414 uint16_t Telemetry_2_slope;
415 int32_t Telemetry_1_offset;
416 int32_t Telemetry_2_offset;
417};
418typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
419
420// For FeatureEnables:
421#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
422#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
423#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
424#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
425#define SMU7_UVD_DPM_CONFIG_MASK 0x10
426#define SMU7_VCE_DPM_CONFIG_MASK 0x20
427#define SMU7_ACP_DPM_CONFIG_MASK 0x40
428#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
429#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
430
431#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
432#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
433#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
434#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
435#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
436#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
437
438// All 'soft registers' should be uint32_t.
439struct SMU73_SoftRegisters
440{
441 uint32_t RefClockFrequency;
442 uint32_t PmTimerPeriod;
443 uint32_t FeatureEnables;
444
445 uint32_t PreVBlankGap;
446 uint32_t VBlankTimeout;
447 uint32_t TrainTimeGap;
448
449 uint32_t MvddSwitchTime;
450 uint32_t LongestAcpiTrainTime;
451 uint32_t AcpiDelay;
452 uint32_t G5TrainTime;
453 uint32_t DelayMpllPwron;
454 uint32_t VoltageChangeTimeout;
455
456 uint32_t HandshakeDisables;
457
458 uint8_t DisplayPhy1Config;
459 uint8_t DisplayPhy2Config;
460 uint8_t DisplayPhy3Config;
461 uint8_t DisplayPhy4Config;
462
463 uint8_t DisplayPhy5Config;
464 uint8_t DisplayPhy6Config;
465 uint8_t DisplayPhy7Config;
466 uint8_t DisplayPhy8Config;
467
468 uint32_t AverageGraphicsActivity;
469 uint32_t AverageMemoryActivity;
470 uint32_t AverageGioActivity;
471
472 uint8_t SClkDpmEnabledLevels;
473 uint8_t MClkDpmEnabledLevels;
474 uint8_t LClkDpmEnabledLevels;
475 uint8_t PCIeDpmEnabledLevels;
476
477 uint8_t UVDDpmEnabledLevels;
478 uint8_t SAMUDpmEnabledLevels;
479 uint8_t ACPDpmEnabledLevels;
480 uint8_t VCEDpmEnabledLevels;
481
482 uint32_t DRAM_LOG_ADDR_H;
483 uint32_t DRAM_LOG_ADDR_L;
484 uint32_t DRAM_LOG_PHY_ADDR_H;
485 uint32_t DRAM_LOG_PHY_ADDR_L;
486 uint32_t DRAM_LOG_BUFF_SIZE;
487 uint32_t UlvEnterCount;
488 uint32_t UlvTime;
489 uint32_t UcodeLoadStatus;
490 uint32_t Reserved[2];
491
492};
493
494typedef struct SMU73_SoftRegisters SMU73_SoftRegisters;
495
496struct SMU73_Firmware_Header
497{
498 uint32_t Digest[5];
499 uint32_t Version;
500 uint32_t HeaderSize;
501 uint32_t Flags;
502 uint32_t EntryPoint;
503 uint32_t CodeSize;
504 uint32_t ImageSize;
505
506 uint32_t Rtos;
507 uint32_t SoftRegisters;
508 uint32_t DpmTable;
509 uint32_t FanTable;
510 uint32_t CacConfigTable;
511 uint32_t CacStatusTable;
512
513
514 uint32_t mcRegisterTable;
515
516
517 uint32_t mcArbDramTimingTable;
518
519
520
521
522 uint32_t PmFuseTable;
523 uint32_t Globals;
524 uint32_t ClockStretcherTable;
525 uint32_t Reserved[41];
526 uint32_t Signature;
527};
528
529typedef struct SMU73_Firmware_Header SMU73_Firmware_Header;
530
531#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
532
533enum DisplayConfig {
534 PowerDown = 1,
535 DP54x4,
536 DP54x2,
537 DP54x1,
538 DP27x4,
539 DP27x2,
540 DP27x1,
541 HDMI297,
542 HDMI162,
543 LVDS,
544 DP324x4,
545 DP324x2,
546 DP324x1
547};
548
549
550#define MC_BLOCK_COUNT 1
551#define CPL_BLOCK_COUNT 5
552#define SE_BLOCK_COUNT 15
553#define GC_BLOCK_COUNT 24
554
555struct SMU7_Local_Cac {
556 uint8_t BlockId;
557 uint8_t SignalId;
558 uint8_t Threshold;
559 uint8_t Padding;
560};
561
562typedef struct SMU7_Local_Cac SMU7_Local_Cac;
563
564struct SMU7_Local_Cac_Table {
565
566 SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
567 SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
568 SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
569 SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
570};
571
572typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
573
574#if !defined(SMC_MICROCODE)
575#pragma pack(pop)
576#endif
577
578// Description of Clock Gating bitmask for Tonga:
579// System Clock Gating
580#define CG_SYS_BITMASK_FIRST_BIT 0 // First bit of Sys CG bitmask
581#define CG_SYS_BITMASK_LAST_BIT 9 // Last bit of Sys CG bitmask
582#define CG_SYS_BIF_MGLS_SHIFT 0
583#define CG_SYS_ROM_SHIFT 1
584#define CG_SYS_MC_MGCG_SHIFT 2
585#define CG_SYS_MC_MGLS_SHIFT 3
586#define CG_SYS_SDMA_MGCG_SHIFT 4
587#define CG_SYS_SDMA_MGLS_SHIFT 5
588#define CG_SYS_DRM_MGCG_SHIFT 6
589#define CG_SYS_HDP_MGCG_SHIFT 7
590#define CG_SYS_HDP_MGLS_SHIFT 8
591#define CG_SYS_DRM_MGLS_SHIFT 9
592
593#define CG_SYS_BIF_MGLS_MASK 0x1
594#define CG_SYS_ROM_MASK 0x2
595#define CG_SYS_MC_MGCG_MASK 0x4
596#define CG_SYS_MC_MGLS_MASK 0x8
597#define CG_SYS_SDMA_MGCG_MASK 0x10
598#define CG_SYS_SDMA_MGLS_MASK 0x20
599#define CG_SYS_DRM_MGCG_MASK 0x40
600#define CG_SYS_HDP_MGCG_MASK 0x80
601#define CG_SYS_HDP_MGLS_MASK 0x100
602#define CG_SYS_DRM_MGLS_MASK 0x200
603
604// Graphics Clock Gating
605#define CG_GFX_BITMASK_FIRST_BIT 16 // First bit of Gfx CG bitmask
606#define CG_GFX_BITMASK_LAST_BIT 20 // Last bit of Gfx CG bitmask
607#define CG_GFX_CGCG_SHIFT 16
608#define CG_GFX_CGLS_SHIFT 17
609#define CG_CPF_MGCG_SHIFT 18
610#define CG_RLC_MGCG_SHIFT 19
611#define CG_GFX_OTHERS_MGCG_SHIFT 20
612
613#define CG_GFX_CGCG_MASK 0x00010000
614#define CG_GFX_CGLS_MASK 0x00020000
615#define CG_CPF_MGCG_MASK 0x00040000
616#define CG_RLC_MGCG_MASK 0x00080000
617#define CG_GFX_OTHERS_MGCG_MASK 0x00100000
618
619
620
621// Voltage Regulator Configuration
622// VR Config info is contained in dpmTable.VRConfig
623
624#define VRCONF_VDDC_MASK 0x000000FF
625#define VRCONF_VDDC_SHIFT 0
626#define VRCONF_VDDGFX_MASK 0x0000FF00
627#define VRCONF_VDDGFX_SHIFT 8
628#define VRCONF_VDDCI_MASK 0x00FF0000
629#define VRCONF_VDDCI_SHIFT 16
630#define VRCONF_MVDD_MASK 0xFF000000
631#define VRCONF_MVDD_SHIFT 24
632
633#define VR_MERGED_WITH_VDDC 0
634#define VR_SVI2_PLANE_1 1
635#define VR_SVI2_PLANE_2 2
636#define VR_SMIO_PATTERN_1 3
637#define VR_SMIO_PATTERN_2 4
638#define VR_STATIC_VOLTAGE 5
639
640// Clock Stretcher Configuration
641
642#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
643#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
644
645// The 'settings' field is subdivided in the following way:
646#define CLOCK_STRETCHER_SETTING_DDT_MASK 0x01
647#define CLOCK_STRETCHER_SETTING_DDT_SHIFT 0x0
648#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK 0x1E
649#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
650#define CLOCK_STRETCHER_SETTING_ENABLE_MASK 0x80
651#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT 0x7
652
653struct SMU_ClockStretcherDataTableEntry {
654 uint8_t minVID;
655 uint8_t maxVID;
656
657
658 uint16_t setting;
659};
660typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
661
662struct SMU_ClockStretcherDataTable {
663 SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
664};
665typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
666
667struct SMU_CKS_LOOKUPTableEntry {
668 uint16_t minFreq;
669 uint16_t maxFreq;
670
671 uint8_t setting;
672 uint8_t padding[3];
673};
674typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
675
676struct SMU_CKS_LOOKUPTable {
677 SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
678};
679typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
680
681struct AgmAvfsData_t {
682 uint16_t avgPsmCount[28];
683 uint16_t minPsmCount[28];
684};
685typedef struct AgmAvfsData_t AgmAvfsData_t;
686
687// AVFS DEFINES
688
689enum VFT_COLUMNS {
690 SCLK0,
691 SCLK1,
692 SCLK2,
693 SCLK3,
694 SCLK4,
695 SCLK5,
696 SCLK6,
697 SCLK7,
698
699 NUM_VFT_COLUMNS
700};
701
702#define TEMP_RANGE_MAXSTEPS 12
703struct VFT_CELL_t {
704 uint16_t Voltage;
705};
706
707typedef struct VFT_CELL_t VFT_CELL_t;
708
709struct VFT_TABLE_t {
710 VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
711 uint16_t AvfsGbv [NUM_VFT_COLUMNS];
712 uint16_t BtcGbv [NUM_VFT_COLUMNS];
713 uint16_t Temperature [TEMP_RANGE_MAXSTEPS];
714
715 uint8_t NumTemperatureSteps;
716 uint8_t padding[3];
717};
718typedef struct VFT_TABLE_t VFT_TABLE_t;
719
720#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h
new file mode 100644
index 000000000000..5916be08a7fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h
@@ -0,0 +1,799 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _SMU73_DISCRETE_H_
24#define _SMU73_DISCRETE_H_
25
26#include "smu73.h"
27
28#pragma pack(push, 1)
29
30struct SMIO_Pattern
31{
32 uint16_t Voltage;
33 uint8_t Smio;
34 uint8_t padding;
35};
36
37typedef struct SMIO_Pattern SMIO_Pattern;
38
39struct SMIO_Table
40{
41 SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
42};
43
44typedef struct SMIO_Table SMIO_Table;
45
46struct SMU73_Discrete_GraphicsLevel {
47 uint32_t MinVoltage;
48
49 uint32_t SclkFrequency;
50
51 uint8_t pcieDpmLevel;
52 uint8_t DeepSleepDivId;
53 uint16_t ActivityLevel;
54 uint32_t CgSpllFuncCntl3;
55 uint32_t CgSpllFuncCntl4;
56 uint32_t SpllSpreadSpectrum;
57 uint32_t SpllSpreadSpectrum2;
58 uint32_t CcPwrDynRm;
59 uint32_t CcPwrDynRm1;
60 uint8_t SclkDid;
61 uint8_t DisplayWatermark;
62 uint8_t EnabledForActivity;
63 uint8_t EnabledForThrottle;
64 uint8_t UpHyst;
65 uint8_t DownHyst;
66 uint8_t VoltageDownHyst;
67 uint8_t PowerThrottle;
68};
69
70typedef struct SMU73_Discrete_GraphicsLevel SMU73_Discrete_GraphicsLevel;
71
72struct SMU73_Discrete_ACPILevel {
73 uint32_t Flags;
74 uint32_t MinVoltage;
75 uint32_t SclkFrequency;
76 uint8_t SclkDid;
77 uint8_t DisplayWatermark;
78 uint8_t DeepSleepDivId;
79 uint8_t padding;
80 uint32_t CgSpllFuncCntl;
81 uint32_t CgSpllFuncCntl2;
82 uint32_t CgSpllFuncCntl3;
83 uint32_t CgSpllFuncCntl4;
84 uint32_t SpllSpreadSpectrum;
85 uint32_t SpllSpreadSpectrum2;
86 uint32_t CcPwrDynRm;
87 uint32_t CcPwrDynRm1;
88};
89
90typedef struct SMU73_Discrete_ACPILevel SMU73_Discrete_ACPILevel;
91
92struct SMU73_Discrete_Ulv {
93 uint32_t CcPwrDynRm;
94 uint32_t CcPwrDynRm1;
95 uint16_t VddcOffset;
96 uint8_t VddcOffsetVid;
97 uint8_t VddcPhase;
98 uint32_t Reserved;
99};
100
101typedef struct SMU73_Discrete_Ulv SMU73_Discrete_Ulv;
102
103struct SMU73_Discrete_MemoryLevel
104{
105 uint32_t MinVoltage;
106 uint32_t MinMvdd;
107
108 uint32_t MclkFrequency;
109
110 uint8_t StutterEnable;
111 uint8_t FreqRange;
112 uint8_t EnabledForThrottle;
113 uint8_t EnabledForActivity;
114
115 uint8_t UpHyst;
116 uint8_t DownHyst;
117 uint8_t VoltageDownHyst;
118 uint8_t padding;
119
120 uint16_t ActivityLevel;
121 uint8_t DisplayWatermark;
122 uint8_t MclkDivider;
123};
124
125typedef struct SMU73_Discrete_MemoryLevel SMU73_Discrete_MemoryLevel;
126
127struct SMU73_Discrete_LinkLevel
128{
129 uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3
130 uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
131 uint8_t EnabledForActivity;
132 uint8_t SPC;
133 uint32_t DownThreshold;
134 uint32_t UpThreshold;
135 uint32_t Reserved;
136};
137
138typedef struct SMU73_Discrete_LinkLevel SMU73_Discrete_LinkLevel;
139
140
141// MC ARB DRAM Timing registers.
142struct SMU73_Discrete_MCArbDramTimingTableEntry
143{
144 uint32_t McArbDramTiming;
145 uint32_t McArbDramTiming2;
146 uint8_t McArbBurstTime;
147 uint8_t TRRDS;
148 uint8_t TRRDL;
149 uint8_t padding;
150};
151
152typedef struct SMU73_Discrete_MCArbDramTimingTableEntry SMU73_Discrete_MCArbDramTimingTableEntry;
153
154struct SMU73_Discrete_MCArbDramTimingTable
155{
156 SMU73_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
157};
158
159typedef struct SMU73_Discrete_MCArbDramTimingTable SMU73_Discrete_MCArbDramTimingTable;
160
161// UVD VCLK/DCLK state (level) definition.
162struct SMU73_Discrete_UvdLevel
163{
164 uint32_t VclkFrequency;
165 uint32_t DclkFrequency;
166 uint32_t MinVoltage;
167 uint8_t VclkDivider;
168 uint8_t DclkDivider;
169 uint8_t padding[2];
170};
171
172typedef struct SMU73_Discrete_UvdLevel SMU73_Discrete_UvdLevel;
173
174// Clocks for other external blocks (VCE, ACP, SAMU).
175struct SMU73_Discrete_ExtClkLevel
176{
177 uint32_t Frequency;
178 uint32_t MinVoltage;
179 uint8_t Divider;
180 uint8_t padding[3];
181};
182
183typedef struct SMU73_Discrete_ExtClkLevel SMU73_Discrete_ExtClkLevel;
184
185struct SMU73_Discrete_StateInfo
186{
187 uint32_t SclkFrequency;
188 uint32_t MclkFrequency;
189 uint32_t VclkFrequency;
190 uint32_t DclkFrequency;
191 uint32_t SamclkFrequency;
192 uint32_t AclkFrequency;
193 uint32_t EclkFrequency;
194 uint16_t MvddVoltage;
195 uint16_t padding16;
196 uint8_t DisplayWatermark;
197 uint8_t McArbIndex;
198 uint8_t McRegIndex;
199 uint8_t SeqIndex;
200 uint8_t SclkDid;
201 int8_t SclkIndex;
202 int8_t MclkIndex;
203 uint8_t PCIeGen;
204
205};
206
207typedef struct SMU73_Discrete_StateInfo SMU73_Discrete_StateInfo;
208
209struct SMU73_Discrete_DpmTable
210{
211 // Multi-DPM controller settings
212 SMU73_PIDController GraphicsPIDController;
213 SMU73_PIDController MemoryPIDController;
214 SMU73_PIDController LinkPIDController;
215
216 uint32_t SystemFlags;
217
218 // SMIO masks for voltage and phase controls
219 uint32_t VRConfig;
220 uint32_t SmioMask1;
221 uint32_t SmioMask2;
222 SMIO_Table SmioTable1;
223 SMIO_Table SmioTable2;
224
225 uint32_t MvddLevelCount;
226
227
228 uint8_t BapmVddcVidHiSidd [SMU73_MAX_LEVELS_VDDC];
229 uint8_t BapmVddcVidLoSidd [SMU73_MAX_LEVELS_VDDC];
230 uint8_t BapmVddcVidHiSidd2 [SMU73_MAX_LEVELS_VDDC];
231
232 uint8_t GraphicsDpmLevelCount;
233 uint8_t MemoryDpmLevelCount;
234 uint8_t LinkLevelCount;
235 uint8_t MasterDeepSleepControl;
236
237 uint8_t UvdLevelCount;
238 uint8_t VceLevelCount;
239 uint8_t AcpLevelCount;
240 uint8_t SamuLevelCount;
241
242 uint8_t ThermOutGpio;
243 uint8_t ThermOutPolarity;
244 uint8_t ThermOutMode;
245 uint8_t BootPhases;
246 uint32_t Reserved[4];
247
248 // State table entries for each DPM state
249 SMU73_Discrete_GraphicsLevel GraphicsLevel [SMU73_MAX_LEVELS_GRAPHICS];
250 SMU73_Discrete_MemoryLevel MemoryACPILevel;
251 SMU73_Discrete_MemoryLevel MemoryLevel [SMU73_MAX_LEVELS_MEMORY];
252 SMU73_Discrete_LinkLevel LinkLevel [SMU73_MAX_LEVELS_LINK];
253 SMU73_Discrete_ACPILevel ACPILevel;
254 SMU73_Discrete_UvdLevel UvdLevel [SMU73_MAX_LEVELS_UVD];
255 SMU73_Discrete_ExtClkLevel VceLevel [SMU73_MAX_LEVELS_VCE];
256 SMU73_Discrete_ExtClkLevel AcpLevel [SMU73_MAX_LEVELS_ACP];
257 SMU73_Discrete_ExtClkLevel SamuLevel [SMU73_MAX_LEVELS_SAMU];
258 SMU73_Discrete_Ulv Ulv;
259
260 uint32_t SclkStepSize;
261 uint32_t Smio [SMU73_MAX_ENTRIES_SMIO];
262
263 uint8_t UvdBootLevel;
264 uint8_t VceBootLevel;
265 uint8_t AcpBootLevel;
266 uint8_t SamuBootLevel;
267
268 uint8_t GraphicsBootLevel;
269 uint8_t GraphicsVoltageChangeEnable;
270 uint8_t GraphicsThermThrottleEnable;
271 uint8_t GraphicsInterval;
272
273 uint8_t VoltageInterval;
274 uint8_t ThermalInterval;
275 uint16_t TemperatureLimitHigh;
276
277 uint16_t TemperatureLimitLow;
278 uint8_t MemoryBootLevel;
279 uint8_t MemoryVoltageChangeEnable;
280
281 uint16_t BootMVdd;
282 uint8_t MemoryInterval;
283 uint8_t MemoryThermThrottleEnable;
284
285 uint16_t VoltageResponseTime;
286 uint16_t PhaseResponseTime;
287
288 uint8_t PCIeBootLinkLevel;
289 uint8_t PCIeGenInterval;
290 uint8_t DTEInterval;
291 uint8_t DTEMode;
292
293 uint8_t SVI2Enable;
294 uint8_t VRHotGpio;
295 uint8_t AcDcGpio;
296 uint8_t ThermGpio;
297
298 uint16_t PPM_PkgPwrLimit;
299 uint16_t PPM_TemperatureLimit;
300
301 uint16_t DefaultTdp;
302 uint16_t TargetTdp;
303
304 uint16_t FpsHighThreshold;
305 uint16_t FpsLowThreshold;
306
307 uint16_t TemperatureLimitEdge;
308 uint16_t TemperatureLimitHotspot;
309 uint16_t TemperatureLimitLiquid1;
310 uint16_t TemperatureLimitLiquid2;
311 uint16_t TemperatureLimitVrVddc;
312 uint16_t TemperatureLimitVrMvdd;
313 uint16_t TemperatureLimitPlx;
314
315 uint16_t FanGainEdge;
316 uint16_t FanGainHotspot;
317 uint16_t FanGainLiquid;
318 uint16_t FanGainVrVddc;
319 uint16_t FanGainVrMvdd;
320 uint16_t FanGainPlx;
321 uint16_t FanGainHbm;
322
323 uint8_t Liquid1_I2C_address;
324 uint8_t Liquid2_I2C_address;
325 uint8_t Vr_I2C_address;
326 uint8_t Plx_I2C_address;
327
328 uint8_t GeminiMode;
329 uint8_t spare17[3];
330 uint32_t GeminiApertureHigh;
331 uint32_t GeminiApertureLow;
332
333 uint8_t Liquid_I2C_LineSCL;
334 uint8_t Liquid_I2C_LineSDA;
335 uint8_t Vr_I2C_LineSCL;
336 uint8_t Vr_I2C_LineSDA;
337 uint8_t Plx_I2C_LineSCL;
338 uint8_t Plx_I2C_LineSDA;
339
340 uint8_t spare1253[2];
341 uint32_t spare123[2];
342
343 uint8_t DTEAmbientTempBase;
344 uint8_t DTETjOffset;
345 uint8_t GpuTjMax;
346 uint8_t GpuTjHyst;
347
348 uint16_t BootVddc;
349 uint16_t BootVddci;
350
351 uint32_t BAPM_TEMP_GRADIENT;
352
353 uint32_t LowSclkInterruptThreshold;
354 uint32_t VddGfxReChkWait;
355
356 uint8_t ClockStretcherAmount;
357 uint8_t Sclk_CKS_masterEn0_7;
358 uint8_t Sclk_CKS_masterEn8_15;
359 uint8_t DPMFreezeAndForced;
360
361 uint8_t Sclk_voltageOffset[8];
362
363 SMU_ClockStretcherDataTable ClockStretcherDataTable;
364 SMU_CKS_LOOKUPTable CKS_LOOKUPTable;
365};
366
367typedef struct SMU73_Discrete_DpmTable SMU73_Discrete_DpmTable;
368
369
370// --------------------------------------------------- Fan Table -----------------------------------------------------------
371struct SMU73_Discrete_FanTable
372{
373 uint16_t FdoMode;
374 int16_t TempMin;
375 int16_t TempMed;
376 int16_t TempMax;
377 int16_t Slope1;
378 int16_t Slope2;
379 int16_t FdoMin;
380 int16_t HystUp;
381 int16_t HystDown;
382 int16_t HystSlope;
383 int16_t TempRespLim;
384 int16_t TempCurr;
385 int16_t SlopeCurr;
386 int16_t PwmCurr;
387 uint32_t RefreshPeriod;
388 int16_t FdoMax;
389 uint8_t TempSrc;
390 int8_t Padding;
391};
392
393typedef struct SMU73_Discrete_FanTable SMU73_Discrete_FanTable;
394
395#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
396#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
397
398
399
400struct SMU7_MclkDpmScoreboard
401{
402
403 uint32_t PercentageBusy;
404
405 int32_t PIDError;
406 int32_t PIDIntegral;
407 int32_t PIDOutput;
408
409 uint32_t SigmaDeltaAccum;
410 uint32_t SigmaDeltaOutput;
411 uint32_t SigmaDeltaLevel;
412
413 uint32_t UtilizationSetpoint;
414
415 uint8_t TdpClampMode;
416 uint8_t TdcClampMode;
417 uint8_t ThermClampMode;
418 uint8_t VoltageBusy;
419
420 int8_t CurrLevel;
421 int8_t TargLevel;
422 uint8_t LevelChangeInProgress;
423 uint8_t UpHyst;
424
425 uint8_t DownHyst;
426 uint8_t VoltageDownHyst;
427 uint8_t DpmEnable;
428 uint8_t DpmRunning;
429
430 uint8_t DpmForce;
431 uint8_t DpmForceLevel;
432 uint8_t DisplayWatermark;
433 uint8_t McArbIndex;
434
435 uint32_t MinimumPerfMclk;
436
437 uint8_t AcpiReq;
438 uint8_t AcpiAck;
439 uint8_t MclkSwitchInProgress;
440 uint8_t MclkSwitchCritical;
441
442 uint8_t IgnoreVBlank;
443 uint8_t TargetMclkIndex;
444 uint8_t TargetMvddIndex;
445 uint8_t MclkSwitchResult;
446
447 uint16_t VbiFailureCount;
448 uint8_t VbiWaitCounter;
449 uint8_t EnabledLevelsChange;
450
451 uint16_t LevelResidencyCounters [SMU73_MAX_LEVELS_MEMORY];
452 uint16_t LevelSwitchCounters [SMU73_MAX_LEVELS_MEMORY];
453
454 void (*TargetStateCalculator)(uint8_t);
455 void (*SavedTargetStateCalculator)(uint8_t);
456
457 uint16_t AutoDpmInterval;
458 uint16_t AutoDpmRange;
459
460 uint16_t VbiTimeoutCount;
461 uint16_t MclkSwitchingTime;
462
463 uint8_t fastSwitch;
464 uint8_t Save_PIC_VDDGFX_EXIT;
465 uint8_t Save_PIC_VDDGFX_ENTER;
466 uint8_t padding;
467
468};
469
470typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
471
472struct SMU7_UlvScoreboard
473{
474 uint8_t EnterUlv;
475 uint8_t ExitUlv;
476 uint8_t UlvActive;
477 uint8_t WaitingForUlv;
478 uint8_t UlvEnable;
479 uint8_t UlvRunning;
480 uint8_t UlvMasterEnable;
481 uint8_t padding;
482 uint32_t UlvAbortedCount;
483 uint32_t UlvTimeStamp;
484};
485
486typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
487
488struct VddgfxSavedRegisters
489{
490 uint32_t GPU_DBG[3];
491 uint32_t MEC_BaseAddress_Hi;
492 uint32_t MEC_BaseAddress_Lo;
493 uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
494 uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
495 uint32_t CP_INT_CNTL;
496};
497
498typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
499
500struct SMU7_VddGfxScoreboard
501{
502 uint8_t VddGfxEnable;
503 uint8_t VddGfxActive;
504 uint8_t VPUResetOccured;
505 uint8_t padding;
506
507 uint32_t VddGfxEnteredCount;
508 uint32_t VddGfxAbortedCount;
509
510 uint32_t VddGfxVid;
511
512 VddgfxSavedRegisters SavedRegisters;
513};
514
515typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
516
517struct SMU7_TdcLimitScoreboard {
518 uint8_t Enable;
519 uint8_t Running;
520 uint16_t Alpha;
521 uint32_t FilteredIddc;
522 uint32_t IddcLimit;
523 uint32_t IddcHyst;
524 SMU7_HystController_Data HystControllerData;
525};
526
527typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
528
529struct SMU7_PkgPwrLimitScoreboard {
530 uint8_t Enable;
531 uint8_t Running;
532 uint16_t Alpha;
533 uint32_t FilteredPkgPwr;
534 uint32_t Limit;
535 uint32_t Hyst;
536 uint32_t LimitFromDriver;
537 SMU7_HystController_Data HystControllerData;
538};
539
540typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
541
542struct SMU7_BapmScoreboard {
543 uint32_t source_powers[SMU73_DTE_SOURCES];
544 uint32_t source_powers_last[SMU73_DTE_SOURCES];
545 int32_t entity_temperatures[SMU73_NUM_GPU_TES];
546 int32_t initial_entity_temperatures[SMU73_NUM_GPU_TES];
547 int32_t Limit;
548 int32_t Hyst;
549 int32_t therm_influence_coeff_table[SMU73_DTE_ITERATIONS * SMU73_DTE_SOURCES * SMU73_DTE_SINKS * 2];
550 int32_t therm_node_table[SMU73_DTE_ITERATIONS * SMU73_DTE_SOURCES * SMU73_DTE_SINKS];
551 uint16_t ConfigTDPPowerScalar;
552 uint16_t FanSpeedPowerScalar;
553 uint16_t OverDrivePowerScalar;
554 uint16_t OverDriveLimitScalar;
555 uint16_t FinalPowerScalar;
556 uint8_t VariantID;
557 uint8_t spare997;
558
559 SMU7_HystController_Data HystControllerData;
560
561 int32_t temperature_gradient_slope;
562 int32_t temperature_gradient;
563 uint32_t measured_temperature;
564};
565
566
567typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
568
569struct SMU7_AcpiScoreboard {
570 uint32_t SavedInterruptMask[2];
571 uint8_t LastACPIRequest;
572 uint8_t CgBifResp;
573 uint8_t RequestType;
574 uint8_t Padding;
575 SMU73_Discrete_ACPILevel D0Level;
576};
577
578typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
579
580struct SMU_QuadraticCoeffs {
581 int32_t m1;
582 uint32_t b;
583
584 int16_t m2;
585 uint8_t m1_shift;
586 uint8_t m2_shift;
587};
588
589typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
590
591struct SMU73_Discrete_PmFuses {
592 /* dw0-dw1 */
593 uint8_t BapmVddCVidHiSidd[8];
594
595 /* dw2-dw3 */
596 uint8_t BapmVddCVidLoSidd[8];
597
598 /* dw4-dw5 */
599 uint8_t VddCVid[8];
600
601 /* dw1*/
602 uint8_t SviLoadLineEn;
603 uint8_t SviLoadLineVddC;
604 uint8_t SviLoadLineTrimVddC;
605 uint8_t SviLoadLineOffsetVddC;
606
607 /* dw2 */
608 uint16_t TDC_VDDC_PkgLimit;
609 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
610 uint8_t TDC_MAWt;
611
612 /* dw3 */
613 uint8_t TdcWaterfallCtl;
614 uint8_t LPMLTemperatureMin;
615 uint8_t LPMLTemperatureMax;
616 uint8_t Reserved;
617
618 /* dw4-dw7 */
619 uint8_t LPMLTemperatureScaler[16];
620
621 /* dw8-dw9 */
622 int16_t FuzzyFan_ErrorSetDelta;
623 int16_t FuzzyFan_ErrorRateSetDelta;
624 int16_t FuzzyFan_PwmSetDelta;
625 uint16_t Reserved6;
626
627 /* dw10-dw14 */
628 uint8_t GnbLPML[16];
629
630 /* dw15 */
631 uint8_t GnbLPMLMaxVid;
632 uint8_t GnbLPMLMinVid;
633 uint8_t Reserved1[2];
634
635 /* dw16 */
636 uint16_t BapmVddCBaseLeakageHiSidd;
637 uint16_t BapmVddCBaseLeakageLoSidd;
638
639 /* AVFS */
640 uint16_t VFT_Temp[3];
641 uint16_t padding;
642
643 SMU_QuadraticCoeffs VFT_ATE[3];
644
645 SMU_QuadraticCoeffs AVFS_GB;
646 SMU_QuadraticCoeffs ATE_ACBTC_GB;
647
648 SMU_QuadraticCoeffs P2V;
649
650 uint32_t PsmCharzFreq;
651
652 uint16_t InversionVoltage;
653 uint16_t PsmCharzTemp;
654
655 uint32_t EnabledAvfsModules;
656};
657
658typedef struct SMU73_Discrete_PmFuses SMU73_Discrete_PmFuses;
659
660struct SMU7_Discrete_Log_Header_Table {
661 uint32_t version;
662 uint32_t asic_id;
663 uint16_t flags;
664 uint16_t entry_size;
665 uint32_t total_size;
666 uint32_t num_of_entries;
667 uint8_t type;
668 uint8_t mode;
669 uint8_t filler_0[2];
670 uint32_t filler_1[2];
671};
672
673typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
674
675struct SMU7_Discrete_Log_Cntl {
676 uint8_t Enabled;
677 uint8_t Type;
678 uint8_t padding[2];
679 uint32_t BufferSize;
680 uint32_t SamplesLogged;
681 uint32_t SampleSize;
682 uint32_t AddrL;
683 uint32_t AddrH;
684};
685
686typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
687
688#define CAC_ACC_NW_NUM_OF_SIGNALS 87
689
690struct SMU7_Discrete_Cac_Collection_Table {
691 uint32_t temperature;
692 uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
693};
694
695typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
696
697struct SMU7_Discrete_Cac_Verification_Table {
698 uint32_t VddcTotalPower;
699 uint32_t VddcLeakagePower;
700 uint32_t VddcConstantPower;
701 uint32_t VddcGfxDynamicPower;
702 uint32_t VddcUvdDynamicPower;
703 uint32_t VddcVceDynamicPower;
704 uint32_t VddcAcpDynamicPower;
705 uint32_t VddcPcieDynamicPower;
706 uint32_t VddcDceDynamicPower;
707 uint32_t VddcCurrent;
708 uint32_t VddcVoltage;
709 uint32_t VddciTotalPower;
710 uint32_t VddciLeakagePower;
711 uint32_t VddciConstantPower;
712 uint32_t VddciDynamicPower;
713 uint32_t Vddr1TotalPower;
714 uint32_t Vddr1LeakagePower;
715 uint32_t Vddr1ConstantPower;
716 uint32_t Vddr1DynamicPower;
717 uint32_t spare[4];
718 uint32_t temperature;
719};
720
721typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
722
723struct SMU7_Discrete_Pm_Status_Table {
724 //Thermal entities
725 int32_t T_meas_max[SMU73_THERMAL_INPUT_LOOP_COUNT];
726 int32_t T_meas_acc[SMU73_THERMAL_INPUT_LOOP_COUNT];
727 int32_t T_meas_acc_cnt[SMU73_THERMAL_INPUT_LOOP_COUNT];
728 uint32_t T_hbm_acc;
729
730 //Voltage domains
731 uint32_t I_calc_max;
732 uint32_t I_calc_acc;
733 uint32_t P_meas_acc;
734 uint32_t V_meas_load_acc;
735 uint32_t I_meas_acc;
736 uint32_t P_meas_acc_vddci;
737 uint32_t V_meas_load_acc_vddci;
738 uint32_t I_meas_acc_vddci;
739
740 //Frequency
741 uint16_t Sclk_dpm_residency[8];
742 uint16_t Uvd_dpm_residency[8];
743 uint16_t Vce_dpm_residency[8];
744
745 //Chip
746 uint32_t P_roc_acc;
747 uint32_t PkgPwr_max;
748 uint32_t PkgPwr_acc;
749 uint32_t MclkSwitchingTime_max;
750 uint32_t MclkSwitchingTime_acc;
751 uint32_t FanPwm_acc;
752 uint32_t FanRpm_acc;
753 uint32_t Gfx_busy_acc;
754 uint32_t Mc_busy_acc;
755 uint32_t Fps_acc;
756
757 uint32_t AccCnt;
758};
759
760typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
761
762//FIXME THESE NEED TO BE UPDATED
763#define SMU7_SCLK_CAC 0x561
764#define SMU7_MCLK_CAC 0xF9
765#define SMU7_VCLK_CAC 0x2DE
766#define SMU7_DCLK_CAC 0x2DE
767#define SMU7_ECLK_CAC 0x25E
768#define SMU7_ACLK_CAC 0x25E
769#define SMU7_SAMCLK_CAC 0x25E
770#define SMU7_DISPCLK_CAC 0x100
771#define SMU7_CAC_CONSTANT 0x2EE3430
772#define SMU7_CAC_CONSTANT_SHIFT 18
773
774#define SMU7_VDDCI_MCLK_CONST 1765
775#define SMU7_VDDCI_MCLK_CONST_SHIFT 16
776#define SMU7_VDDCI_VDDCI_CONST 50958
777#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
778#define SMU7_VDDCI_CONST 11781
779#define SMU7_VDDCI_STROBE_PWR 1331
780
781#define SMU7_VDDR1_CONST 693
782#define SMU7_VDDR1_CAC_WEIGHT 20
783#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
784#define SMU7_VDDR1_STROBE_PWR 512
785
786#define SMU7_AREA_COEFF_UVD 0xA78
787#define SMU7_AREA_COEFF_VCE 0x190A
788#define SMU7_AREA_COEFF_ACP 0x22D1
789#define SMU7_AREA_COEFF_SAMU 0x534
790
791//ThermOutMode values
792#define SMU7_THERM_OUT_MODE_DISABLE 0x0
793#define SMU7_THERM_OUT_MODE_THERM_ONLY 0x1
794#define SMU7_THERM_OUT_MODE_THERM_VRHOT 0x2
795
796#pragma pack(pop)
797
798#endif
799
diff --git a/drivers/gpu/drm/amd/amdgpu/smu7_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
index 0b0b404ff091..0b0b404ff091 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu7_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
diff --git a/drivers/gpu/drm/amd/amdgpu/smu7_fusion.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h
index 78ada9ffd508..78ada9ffd508 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu7_fusion.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h
diff --git a/drivers/gpu/drm/amd/amdgpu/smu8.h b/drivers/gpu/drm/amd/powerplay/inc/smu8.h
index d758d07b6a31..d758d07b6a31 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu8.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu8.h
diff --git a/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h b/drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h
index 5c9cc3c0bbfa..0c37c94e9414 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h
@@ -48,6 +48,14 @@ struct SMU8_Port80MonitorTable {
48 uint8_t EnableDramShadow; 48 uint8_t EnableDramShadow;
49}; 49};
50 50
51/* Display specific power management parameters */
52#define PWRMGT_SEPARATION_TIME_SHIFT 0
53#define PWRMGT_SEPARATION_TIME_MASK 0xFFFF
54#define PWRMGT_DISABLE_CPU_CSTATES_SHIFT 16
55#define PWRMGT_DISABLE_CPU_CSTATES_MASK 0x1
56#define PWRMGT_DISABLE_CPU_PSTATES_SHIFT 24
57#define PWRMGT_DISABLE_CPU_PSTATES_MASK 0x1
58
51/* Clock Table Definitions */ 59/* Clock Table Definitions */
52#define NUM_SCLK_LEVELS 8 60#define NUM_SCLK_LEVELS 8
53#define NUM_LCLK_LEVELS 8 61#define NUM_LCLK_LEVELS 8
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
index f8ba071f39c8..f8ba071f39c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
new file mode 100644
index 000000000000..c24a81eebc7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU_UCODE_XFER_VI_H
25#define SMU_UCODE_XFER_VI_H
26
27#define SMU_DRAMData_TOC_VERSION 1
28#define MAX_IH_REGISTER_COUNT 65535
29#define SMU_DIGEST_SIZE_BYTES 20
30#define SMU_FB_SIZE_BYTES 1048576
31#define SMU_MAX_ENTRIES 12
32
33#define UCODE_ID_SMU 0
34#define UCODE_ID_SDMA0 1
35#define UCODE_ID_SDMA1 2
36#define UCODE_ID_CP_CE 3
37#define UCODE_ID_CP_PFP 4
38#define UCODE_ID_CP_ME 5
39#define UCODE_ID_CP_MEC 6
40#define UCODE_ID_CP_MEC_JT1 7
41#define UCODE_ID_CP_MEC_JT2 8
42#define UCODE_ID_GMCON_RENG 9
43#define UCODE_ID_RLC_G 10
44#define UCODE_ID_IH_REG_RESTORE 11
45#define UCODE_ID_VBIOS 12
46#define UCODE_ID_MISC_METADATA 13
47#define UCODE_ID_RLC_SCRATCH 32
48#define UCODE_ID_RLC_SRM_ARAM 33
49#define UCODE_ID_RLC_SRM_DRAM 34
50#define UCODE_ID_MEC_STORAGE 35
51#define UCODE_ID_VBIOS_PARAMETERS 36
52#define UCODE_META_DATA 0xFF
53
54#define UCODE_ID_SMU_MASK 0x00000001
55#define UCODE_ID_SDMA0_MASK 0x00000002
56#define UCODE_ID_SDMA1_MASK 0x00000004
57#define UCODE_ID_CP_CE_MASK 0x00000008
58#define UCODE_ID_CP_PFP_MASK 0x00000010
59#define UCODE_ID_CP_ME_MASK 0x00000020
60#define UCODE_ID_CP_MEC_MASK 0x00000040
61#define UCODE_ID_CP_MEC_JT1_MASK 0x00000080
62#define UCODE_ID_CP_MEC_JT2_MASK 0x00000100
63#define UCODE_ID_GMCON_RENG_MASK 0x00000200
64#define UCODE_ID_RLC_G_MASK 0x00000400
65#define UCODE_ID_IH_REG_RESTORE_MASK 0x00000800
66#define UCODE_ID_VBIOS_MASK 0x00001000
67
68#define UCODE_FLAG_UNHALT_MASK 0x1
69
70struct SMU_Entry {
71#ifndef __BIG_ENDIAN
72 uint16_t id;
73 uint16_t version;
74 uint32_t image_addr_high;
75 uint32_t image_addr_low;
76 uint32_t meta_data_addr_high;
77 uint32_t meta_data_addr_low;
78 uint32_t data_size_byte;
79 uint16_t flags;
80 uint16_t num_register_entries;
81#else
82 uint16_t version;
83 uint16_t id;
84 uint32_t image_addr_high;
85 uint32_t image_addr_low;
86 uint32_t meta_data_addr_high;
87 uint32_t meta_data_addr_low;
88 uint32_t data_size_byte;
89 uint16_t num_register_entries;
90 uint16_t flags;
91#endif
92};
93
94struct SMU_DRAMData_TOC {
95 uint32_t structure_version;
96 uint32_t num_entries;
97 struct SMU_Entry entry[SMU_MAX_ENTRIES];
98};
99
100#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
new file mode 100644
index 000000000000..504f035d1843
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _SMUMGR_H_
24#define _SMUMGR_H_
25#include <linux/types.h>
26#include "pp_instance.h"
27#include "amd_powerplay.h"
28
29struct pp_smumgr;
30struct pp_instance;
31
32#define smu_lower_32_bits(n) ((uint32_t)(n))
33#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
34
35struct pp_smumgr_func {
36 int (*smu_init)(struct pp_smumgr *smumgr);
37 int (*smu_fini)(struct pp_smumgr *smumgr);
38 int (*start_smu)(struct pp_smumgr *smumgr);
39 int (*check_fw_load_finish)(struct pp_smumgr *smumgr,
40 uint32_t firmware);
41 int (*request_smu_load_fw)(struct pp_smumgr *smumgr);
42 int (*request_smu_load_specific_fw)(struct pp_smumgr *smumgr,
43 uint32_t firmware);
44 int (*get_argument)(struct pp_smumgr *smumgr);
45 int (*send_msg_to_smc)(struct pp_smumgr *smumgr, uint16_t msg);
46 int (*send_msg_to_smc_with_parameter)(struct pp_smumgr *smumgr,
47 uint16_t msg, uint32_t parameter);
48 int (*download_pptable_settings)(struct pp_smumgr *smumgr,
49 void **table);
50 int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
51};
52
53struct pp_smumgr {
54 uint32_t chip_family;
55 uint32_t chip_id;
56 uint32_t hw_revision;
57 void *device;
58 void *backend;
59 uint32_t usec_timeout;
60 bool reload_fw;
61 const struct pp_smumgr_func *smumgr_funcs;
62};
63
64
65extern int smum_init(struct amd_pp_init *pp_init,
66 struct pp_instance *handle);
67
68extern int smum_fini(struct pp_smumgr *smumgr);
69
70extern int smum_get_argument(struct pp_smumgr *smumgr);
71
72extern int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table);
73
74extern int smum_upload_powerplay_table(struct pp_smumgr *smumgr);
75
76extern int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
77
78extern int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
79 uint16_t msg, uint32_t parameter);
80
81extern int smum_wait_on_register(struct pp_smumgr *smumgr,
82 uint32_t index, uint32_t value, uint32_t mask);
83
84extern int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
85 uint32_t index, uint32_t value, uint32_t mask);
86
87extern int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
88 uint32_t indirect_port, uint32_t index,
89 uint32_t value, uint32_t mask);
90
91
92extern void smum_wait_for_indirect_register_unequal(
93 struct pp_smumgr *smumgr,
94 uint32_t indirect_port, uint32_t index,
95 uint32_t value, uint32_t mask);
96
97extern int smu_allocate_memory(void *device, uint32_t size,
98 enum cgs_gpu_mem_type type,
99 uint32_t byte_align, uint64_t *mc_addr,
100 void **kptr, void *handle);
101
102extern int smu_free_memory(void *device, void *handle);
103
104#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
105
106#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
107
108#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
109 port, index, value, mask) \
110 smum_wait_on_indirect_register(smumgr, \
111 mm##port##_INDEX, index, value, mask)
112
113
114#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
115 index, value, mask) \
116 smum_wait_for_register_unequal(smumgr, \
117 index, value, mask)
118
119#define SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, value, mask) \
120 SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
121 mm##reg, value, mask)
122
123#define SMUM_WAIT_FIELD_UNEQUAL(smumgr, reg, field, fieldval) \
124 SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, \
125 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
126 SMUM_FIELD_MASK(reg, field))
127
128#define SMUM_GET_FIELD(value, reg, field) \
129 (((value) & SMUM_FIELD_MASK(reg, field)) \
130 >> SMUM_FIELD_SHIFT(reg, field))
131
132#define SMUM_READ_FIELD(device, reg, field) \
133 SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
134
135#define SMUM_SET_FIELD(value, reg, field, field_val) \
136 (((value) & ~SMUM_FIELD_MASK(reg, field)) | \
137 (SMUM_FIELD_MASK(reg, field) & ((field_val) << \
138 SMUM_FIELD_SHIFT(reg, field))))
139
140#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
141 port, index, value, mask) \
142 smum_wait_on_indirect_register(smumgr, \
143 mm##port##_INDEX_0, index, value, mask)
144
145#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
146 port, index, value, mask) \
147 smum_wait_for_indirect_register_unequal(smumgr, \
148 mm##port##_INDEX_0, index, value, mask)
149
150
151#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
152 SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
153
154#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
155 SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
156
157
158/*Operations on named fields.*/
159
160#define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
161 SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
162 reg, field)
163
164#define SMUM_WRITE_FIELD(device, reg, field, fieldval) \
165 cgs_write_register(device, mm##reg, \
166 SMUM_SET_FIELD(cgs_read_register(device, mm##reg), reg, field, fieldval))
167
168#define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
169 cgs_write_ind_register(device, port, ix##reg, \
170 SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
171 reg, field, fieldval))
172
173#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
174 SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
175 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
176 SMUM_FIELD_MASK(reg, field))
177
178#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
179 SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
180 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
181 SMUM_FIELD_MASK(reg, field))
182#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h
new file mode 100644
index 000000000000..63631296d751
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h
@@ -0,0 +1,420 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_PP_SMC_H
25#define TONGA_PP_SMC_H
26
27#pragma pack(push, 1)
28
29#define PPSMC_SWSTATE_FLAG_DC 0x01
30#define PPSMC_SWSTATE_FLAG_UVD 0x02
31#define PPSMC_SWSTATE_FLAG_VCE 0x04
32#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08
33
34#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
35#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
36#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
37
38#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
39#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
40#define PPSMC_SYSTEMFLAG_GDDR5 0x04
41
42#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
43
44#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
45#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
46#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40
47
48
49#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
50#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
51
52#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
53#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
54
55#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10
56#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20
57#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40
58
59/* Defines for DPM 2.0 */
60#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
61#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
62#define PPSMC_DPM2FLAGS_OCP 0x04
63
64/* Defines for display watermark level */
65
66#define PPSMC_DISPLAY_WATERMARK_LOW 0
67#define PPSMC_DISPLAY_WATERMARK_HIGH 1
68
69/* In the HW performance level's state flags:*/
70#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
71#define PPSMC_STATEFLAG_POWERBOOST 0x02
72#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
73#define PPSMC_STATEFLAG_POWERSHIFT 0x08
74#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
75#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
76#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
77
78/* Fan control algorithm:*/
79#define FDO_MODE_HARDWARE 0
80#define FDO_MODE_PIECE_WISE_LINEAR 1
81
82enum FAN_CONTROL {
83 FAN_CONTROL_FUZZY,
84 FAN_CONTROL_TABLE
85};
86
87/* Return codes for driver to SMC communication.*/
88
89#define PPSMC_Result_OK ((uint16_t)0x01)
90#define PPSMC_Result_NoMore ((uint16_t)0x02)
91#define PPSMC_Result_NotNow ((uint16_t)0x03)
92
93#define PPSMC_Result_Failed ((uint16_t)0xFF)
94#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
95#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
96
97typedef uint16_t PPSMC_Result;
98
99#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
100
101
102#define PPSMC_MSG_Halt ((uint16_t)0x10)
103#define PPSMC_MSG_Resume ((uint16_t)0x11)
104#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
105#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
106#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
107#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
108#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
109#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
110#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
111#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
112#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
113#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
114
115#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
116#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
117#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
118#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
119#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
120
121#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
122#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
123#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
124#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
125#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
126#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
127#define PPSMC_CACHistoryStart ((uint16_t)0x57)
128#define PPSMC_CACHistoryStop ((uint16_t)0x58)
129#define PPSMC_TDPClampingActive ((uint16_t)0x59)
130#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
131#define PPSMC_StartFanControl ((uint16_t)0x5B)
132#define PPSMC_StopFanControl ((uint16_t)0x5C)
133#define PPSMC_NoDisplay ((uint16_t)0x5D)
134#define PPSMC_HasDisplay ((uint16_t)0x5E)
135#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
136#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
137#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
138#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
139#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
140#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
141#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
142#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
143#define PPSMC_OCPActive ((uint16_t)0x6C)
144#define PPSMC_OCPInactive ((uint16_t)0x6D)
145#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
146#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
147#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
148#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
149#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
150#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
151#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
152#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
153#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
154#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
155#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
156#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
157#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
158#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
159#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
160#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
161
162#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
163#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
164#define PPSMC_FlushDataCache ((uint16_t)0x80)
165#define PPSMC_FlushInstrCache ((uint16_t)0x81)
166
167#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
168#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
169
170#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
171
172#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
173#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
174#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
175#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
176
177#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
178#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90)
179#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91)
180
181#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92)
182#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93)
183
184#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94)
185#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95)
186#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96)
187#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97)
188#define PPSMC_MSG_GPIO17 ((uint16_t)0x98)
189
190#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99)
191#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A)
192#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B)
193#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C)
194#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D)
195#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E)
196
197#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
198
199/* Trinity Specific Messages*/
200#define PPSMC_MSG_Test ((uint16_t) 0x100)
201#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
202#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
203#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
204#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
205#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
206#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
207#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
208#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
209#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
210#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
211#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
212#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
213#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
214#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
215#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
216#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
217#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
218#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
219#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
220#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
221#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
222#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
223#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
224#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
225#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
226#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
227#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
228#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
229#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
230#define PPSMC_MSG_PCIE_PHYPowerDown ((uint16_t) 0x122)
231#define PPSMC_MSG_PCIE_PHYPowerUp ((uint16_t) 0x123)
232#define PPSMC_MSG_UVD_DPM_Config ((uint16_t) 0x124)
233#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
234#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
235#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
236#define PPSMC_MSG_NBDPM_Config ((uint16_t) 0x125)
237#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint16_t) 0x126)
238#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint16_t) 0x127)
239#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
240
241#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
242#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
243#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
244#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
245#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
246#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
247#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
248#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
249#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
250#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
251#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
252#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
253#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
254#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
255#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
256#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
257#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
258#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
259#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
260#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
261#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
262#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
263#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
264#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
265#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
266#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
267#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
268#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
269#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
270#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
271#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
272#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
273#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
274#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
275#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
276
277#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
278#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
279
280#define PPSMC_MSG_DPM_Enable ((uint16_t)0x14e)
281#define PPSMC_MSG_DPM_Disable ((uint16_t)0x14f)
282#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t)0x150)
283#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t)0x151)
284#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t)0x152)
285#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t)0x153)
286#define PPSMC_MSG_UVDDPM_Enable ((uint16_t)0x154)
287#define PPSMC_MSG_UVDDPM_Disable ((uint16_t)0x155)
288#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t)0x156)
289#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t)0x157)
290#define PPSMC_MSG_ACPDPM_Enable ((uint16_t)0x158)
291#define PPSMC_MSG_ACPDPM_Disable ((uint16_t)0x159)
292#define PPSMC_MSG_VCEDPM_Enable ((uint16_t)0x15a)
293#define PPSMC_MSG_VCEDPM_Disable ((uint16_t)0x15b)
294#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t)0x15c)
295
296#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
297#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
298#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
299#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
300#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
301#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
302#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
303#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
304#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
305#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
306#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
307#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
308#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
309#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
310#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
311#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t)0x16c)
312#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t)0x16d)
313#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t)0x16e)
314#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t)0x16f)
315#define PPSMC_MSG_PmStatusLogStart ((uint16_t)0x170)
316#define PPSMC_MSG_PmStatusLogSample ((uint16_t)0x171)
317#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
318#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
319#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
320#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
321#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
322#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
323#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
324#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
325#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
326#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
327#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
328#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
329#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
330#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
331#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
332#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
333#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
334#define PPSMC_MSG_UVD_HANDSHAKE_OFF ((uint16_t) 0x183)
335#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
336#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
337#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
338#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
339#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
340#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
341#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
342#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
343#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
344#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
345#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
346#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
347#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
348#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
349#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
350#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
351#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
352#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
353#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
354#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
355#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
356#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
357#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
358#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
359#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
360
361#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
362#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
363#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
364#define PPSMC_MSG_Enable_PCC ((uint16_t) 0x19E)
365#define PPSMC_MSG_Disable_PCC ((uint16_t) 0x19F)
366
367#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
368#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
369#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
370#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
371#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
372#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
373#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
374#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
375#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
376
377#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
378#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
379#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
380#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
381#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
382#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
383#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
384
385#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
386#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
387#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
388#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
389#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
390#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
391#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
392#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
393#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
394#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
395#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
396#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
397#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
398#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
399#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
400#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
401#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
402#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
403#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
404#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
405#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
406#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
407#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
408#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
409
410typedef uint16_t PPSMC_Msg;
411
412/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/
413#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
414#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
415#define PPSMC_EVENT_STATUS_DC 0x00000004
416#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
417
418
419#pragma pack(pop)
420#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
new file mode 100644
index 000000000000..6c4ef135cf01
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the 'smu manager' sub-component of powerplay.
3# It provides the smu management services for the driver.
4
5SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o
6
7AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
8
9AMD_POWERPLAY_FILES += $(AMD_PP_SMUMGR)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
new file mode 100644
index 000000000000..873a8d264d5c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -0,0 +1,858 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/gfp.h>
27#include "linux/delay.h"
28#include "cgs_common.h"
29#include "smu/smu_8_0_d.h"
30#include "smu/smu_8_0_sh_mask.h"
31#include "smu8.h"
32#include "smu8_fusion.h"
33#include "cz_smumgr.h"
34#include "cz_ppsmc.h"
35#include "smu_ucode_xfer_cz.h"
36#include "gca/gfx_8_0_d.h"
37#include "gca/gfx_8_0_sh_mask.h"
38#include "smumgr.h"
39
40#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
41
42static enum cz_scratch_entry firmware_list[] = {
43 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
44 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
45 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
46 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
47 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
48 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
49 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
50 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
51};
52
53static int cz_smum_get_argument(struct pp_smumgr *smumgr)
54{
55 if (smumgr == NULL || smumgr->device == NULL)
56 return -EINVAL;
57
58 return cgs_read_register(smumgr->device,
59 mmSMU_MP1_SRBM2P_ARG_0);
60}
61
62static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
63 uint16_t msg)
64{
65 int result = 0;
66
67 if (smumgr == NULL || smumgr->device == NULL)
68 return -EINVAL;
69
70 result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
71 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
72 if (result != 0) {
73 printk(KERN_ERR "[ powerplay ] cz_send_msg_to_smc_async failed\n");
74 return result;
75 }
76
77 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
78 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
79
80 return 0;
81}
82
83/* Send a message to the SMC, and wait for its response.*/
84static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
85{
86 int result = 0;
87
88 result = cz_send_msg_to_smc_async(smumgr, msg);
89 if (result != 0)
90 return result;
91
92 result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
93 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
94
95 if (result != 0)
96 return result;
97
98 return 0;
99}
100
101static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
102 uint32_t smc_address, uint32_t limit)
103{
104 if (smumgr == NULL || smumgr->device == NULL)
105 return -EINVAL;
106
107 if (0 != (3 & smc_address)) {
108 printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
109 return -1;
110 }
111
112 if (limit <= (smc_address + 3)) {
113 printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
114 return -1;
115 }
116
117 cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
118 SMN_MP1_SRAM_START_ADDR + smc_address);
119
120 return 0;
121}
122
123static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
124 uint32_t smc_address, uint32_t value, uint32_t limit)
125{
126 int result;
127
128 if (smumgr == NULL || smumgr->device == NULL)
129 return -EINVAL;
130
131 result = cz_set_smc_sram_address(smumgr, smc_address, limit);
132 cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
133
134 return 0;
135}
136
137static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
138 uint16_t msg, uint32_t parameter)
139{
140 if (smumgr == NULL || smumgr->device == NULL)
141 return -EINVAL;
142
143 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
144
145 return cz_send_msg_to_smc(smumgr, msg);
146}
147
148static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
149{
150 struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
151 int result = 0;
152 uint32_t smc_address;
153
154 if (!smumgr->reload_fw) {
155 printk(KERN_INFO "[ powerplay ] skip reloading...\n");
156 return 0;
157 }
158
159 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
160 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
161
162 cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
163
164 cz_send_msg_to_smc_with_parameter(smumgr,
165 PPSMC_MSG_DriverDramAddrHi,
166 cz_smu->toc_buffer.mc_addr_high);
167
168 cz_send_msg_to_smc_with_parameter(smumgr,
169 PPSMC_MSG_DriverDramAddrLo,
170 cz_smu->toc_buffer.mc_addr_low);
171
172 cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
173
174 cz_send_msg_to_smc_with_parameter(smumgr,
175 PPSMC_MSG_ExecuteJob,
176 cz_smu->toc_entry_aram);
177 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
178 cz_smu->toc_entry_power_profiling_index);
179
180 result = cz_send_msg_to_smc_with_parameter(smumgr,
181 PPSMC_MSG_ExecuteJob,
182 cz_smu->toc_entry_initialize_index);
183
184 return result;
185}
186
187static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
188 uint32_t firmware)
189{
190 int i;
191 uint32_t index = SMN_MP1_SRAM_START_ADDR +
192 SMU8_FIRMWARE_HEADER_LOCATION +
193 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
194
195 if (smumgr == NULL || smumgr->device == NULL)
196 return -EINVAL;
197
198 return cgs_read_register(smumgr->device,
199 mmSMU_MP1_SRBM2P_ARG_0);
200
201 cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
202
203 for (i = 0; i < smumgr->usec_timeout; i++) {
204 if (firmware ==
205 (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
206 break;
207 udelay(1);
208 }
209
210 if (i >= smumgr->usec_timeout) {
211 printk(KERN_ERR "[ powerplay ] SMU check loaded firmware failed.\n");
212 return -EINVAL;
213 }
214
215 return 0;
216}
217
218static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
219{
220 uint32_t reg_data;
221 uint32_t tmp;
222 int ret = 0;
223 struct cgs_firmware_info info = {0};
224 struct cz_smumgr *cz_smu;
225
226 if (smumgr == NULL || smumgr->device == NULL)
227 return -EINVAL;
228
229 cz_smu = (struct cz_smumgr *)smumgr->backend;
230 ret = cgs_get_firmware_info(smumgr->device,
231 CGS_UCODE_ID_CP_MEC, &info);
232
233 if (ret)
234 return -EINVAL;
235
236 /* Disable MEC parsing/prefetching */
237 tmp = cgs_read_register(smumgr->device,
238 mmCP_MEC_CNTL);
239 tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
240 tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
241 cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
242
243 tmp = cgs_read_register(smumgr->device,
244 mmCP_CPC_IC_BASE_CNTL);
245
246 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
247 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
248 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
249 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
250 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
251
252 reg_data = smu_lower_32_bits(info.mc_addr) &
253 SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
254 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
255
256 reg_data = smu_upper_32_bits(info.mc_addr) &
257 SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
258 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
259
260 return 0;
261}
262
263static int cz_start_smu(struct pp_smumgr *smumgr)
264{
265 int ret = 0;
266 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
267 UCODE_ID_SDMA0_MASK |
268 UCODE_ID_SDMA1_MASK |
269 UCODE_ID_CP_CE_MASK |
270 UCODE_ID_CP_ME_MASK |
271 UCODE_ID_CP_PFP_MASK |
272 UCODE_ID_CP_MEC_JT1_MASK |
273 UCODE_ID_CP_MEC_JT2_MASK;
274
275 cz_request_smu_load_fw(smumgr);
276 cz_check_fw_load_finish(smumgr, fw_to_check);
277
278 ret = cz_load_mec_firmware(smumgr);
279 if (ret)
280 printk(KERN_ERR "[ powerplay ] Mec Firmware load failed\n");
281
282 return ret;
283}
284
285static uint8_t cz_translate_firmware_enum_to_arg(
286 enum cz_scratch_entry firmware_enum)
287{
288 uint8_t ret = 0;
289
290 switch (firmware_enum) {
291 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
292 ret = UCODE_ID_SDMA0;
293 break;
294 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
295 ret = UCODE_ID_SDMA1;
296 break;
297 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
298 ret = UCODE_ID_CP_CE;
299 break;
300 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
301 ret = UCODE_ID_CP_PFP;
302 break;
303 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
304 ret = UCODE_ID_CP_ME;
305 break;
306 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
307 ret = UCODE_ID_CP_MEC_JT1;
308 break;
309 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
310 ret = UCODE_ID_CP_MEC_JT2;
311 break;
312 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
313 ret = UCODE_ID_GMCON_RENG;
314 break;
315 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
316 ret = UCODE_ID_RLC_G;
317 break;
318 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
319 ret = UCODE_ID_RLC_SCRATCH;
320 break;
321 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
322 ret = UCODE_ID_RLC_SRM_ARAM;
323 break;
324 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
325 ret = UCODE_ID_RLC_SRM_DRAM;
326 break;
327 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
328 ret = UCODE_ID_DMCU_ERAM;
329 break;
330 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
331 ret = UCODE_ID_DMCU_IRAM;
332 break;
333 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
334 ret = TASK_ARG_INIT_MM_PWR_LOG;
335 break;
336 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
337 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
338 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
339 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
340 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
341 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
342 ret = TASK_ARG_REG_MMIO;
343 break;
344 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
345 ret = TASK_ARG_INIT_CLK_TABLE;
346 break;
347 }
348
349 return ret;
350}
351
352static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
353{
354 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
355
356 switch (fw_type) {
357 case UCODE_ID_SDMA0:
358 result = CGS_UCODE_ID_SDMA0;
359 break;
360 case UCODE_ID_SDMA1:
361 result = CGS_UCODE_ID_SDMA1;
362 break;
363 case UCODE_ID_CP_CE:
364 result = CGS_UCODE_ID_CP_CE;
365 break;
366 case UCODE_ID_CP_PFP:
367 result = CGS_UCODE_ID_CP_PFP;
368 break;
369 case UCODE_ID_CP_ME:
370 result = CGS_UCODE_ID_CP_ME;
371 break;
372 case UCODE_ID_CP_MEC_JT1:
373 result = CGS_UCODE_ID_CP_MEC_JT1;
374 break;
375 case UCODE_ID_CP_MEC_JT2:
376 result = CGS_UCODE_ID_CP_MEC_JT2;
377 break;
378 case UCODE_ID_RLC_G:
379 result = CGS_UCODE_ID_RLC_G;
380 break;
381 default:
382 break;
383 }
384
385 return result;
386}
387
388static int cz_smu_populate_single_scratch_task(
389 struct pp_smumgr *smumgr,
390 enum cz_scratch_entry fw_enum,
391 uint8_t type, bool is_last)
392{
393 uint8_t i;
394 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
395 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
396 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
397
398 task->type = type;
399 task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
400 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
401
402 for (i = 0; i < cz_smu->scratch_buffer_length; i++)
403 if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
404 break;
405
406 if (i >= cz_smu->scratch_buffer_length) {
407 printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n");
408 return -EINVAL;
409 }
410
411 task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low;
412 task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high;
413 task->size_bytes = cz_smu->scratch_buffer[i].data_size;
414
415 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
416 struct cz_ih_meta_data *pIHReg_restore =
417 (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
418 pIHReg_restore->command =
419 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
420 }
421
422 return 0;
423}
424
425static int cz_smu_populate_single_ucode_load_task(
426 struct pp_smumgr *smumgr,
427 enum cz_scratch_entry fw_enum,
428 bool is_last)
429{
430 uint8_t i;
431 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
432 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
433 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
434
435 task->type = TASK_TYPE_UCODE_LOAD;
436 task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
437 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
438
439 for (i = 0; i < cz_smu->driver_buffer_length; i++)
440 if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
441 break;
442
443 if (i >= cz_smu->driver_buffer_length) {
444 printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n");
445 return -EINVAL;
446 }
447
448 task->addr.low = cz_smu->driver_buffer[i].mc_addr_low;
449 task->addr.high = cz_smu->driver_buffer[i].mc_addr_high;
450 task->size_bytes = cz_smu->driver_buffer[i].data_size;
451
452 return 0;
453}
454
455static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
456{
457 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
458
459 cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
460 cz_smu_populate_single_scratch_task(smumgr,
461 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
462 TASK_TYPE_UCODE_SAVE, true);
463
464 return 0;
465}
466
467static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
468{
469 int i;
470 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
471 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
472
473 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
474 toc->JobList[i] = (uint8_t)IGNORE_JOB;
475
476 return 0;
477}
478
479static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
480{
481 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
482 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
483
484 toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
485 cz_smu_populate_single_scratch_task(smumgr,
486 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
487 TASK_TYPE_UCODE_SAVE, false);
488
489 cz_smu_populate_single_scratch_task(smumgr,
490 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
491 TASK_TYPE_UCODE_SAVE, true);
492
493 return 0;
494}
495
496
497static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
498{
499 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
500 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
501
502 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
503
504 cz_smu_populate_single_ucode_load_task(smumgr,
505 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
506 cz_smu_populate_single_ucode_load_task(smumgr,
507 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
508 cz_smu_populate_single_ucode_load_task(smumgr,
509 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
510 cz_smu_populate_single_ucode_load_task(smumgr,
511 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
512 cz_smu_populate_single_ucode_load_task(smumgr,
513 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
514 cz_smu_populate_single_ucode_load_task(smumgr,
515 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
516
517 /* populate scratch */
518 cz_smu_populate_single_scratch_task(smumgr,
519 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
520 TASK_TYPE_UCODE_LOAD, false);
521
522 cz_smu_populate_single_scratch_task(smumgr,
523 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
524 TASK_TYPE_UCODE_LOAD, false);
525
526 cz_smu_populate_single_scratch_task(smumgr,
527 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
528 TASK_TYPE_UCODE_LOAD, true);
529
530 return 0;
531}
532
533static int cz_smu_construct_toc_for_power_profiling(
534 struct pp_smumgr *smumgr)
535{
536 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
537
538 cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
539
540 cz_smu_populate_single_scratch_task(smumgr,
541 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
542 TASK_TYPE_INITIALIZE, true);
543 return 0;
544}
545
546static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
547{
548 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
549
550 cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
551
552 cz_smu_populate_single_ucode_load_task(smumgr,
553 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
554 cz_smu_populate_single_ucode_load_task(smumgr,
555 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
556 cz_smu_populate_single_ucode_load_task(smumgr,
557 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
558 cz_smu_populate_single_ucode_load_task(smumgr,
559 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
560 cz_smu_populate_single_ucode_load_task(smumgr,
561 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
562 cz_smu_populate_single_ucode_load_task(smumgr,
563 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
564 cz_smu_populate_single_ucode_load_task(smumgr,
565 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
566 cz_smu_populate_single_ucode_load_task(smumgr,
567 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
568
569 return 0;
570}
571
572static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
573{
574 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
575
576 cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
577
578 cz_smu_populate_single_scratch_task(smumgr,
579 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
580 TASK_TYPE_INITIALIZE, true);
581
582 return 0;
583}
584
585static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
586{
587 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
588
589 cz_smu->toc_entry_used_count = 0;
590
591 cz_smu_initialize_toc_empty_job_list(smumgr);
592
593 cz_smu_construct_toc_for_rlc_aram_save(smumgr);
594
595 cz_smu_construct_toc_for_vddgfx_enter(smumgr);
596
597 cz_smu_construct_toc_for_vddgfx_exit(smumgr);
598
599 cz_smu_construct_toc_for_power_profiling(smumgr);
600
601 cz_smu_construct_toc_for_bootup(smumgr);
602
603 cz_smu_construct_toc_for_clock_table(smumgr);
604
605 return 0;
606}
607
608static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
609{
610 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
611 uint32_t firmware_type;
612 uint32_t i;
613 int ret;
614 enum cgs_ucode_id ucode_id;
615 struct cgs_firmware_info info = {0};
616
617 cz_smu->driver_buffer_length = 0;
618
619 for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
620
621 firmware_type = cz_translate_firmware_enum_to_arg(
622 firmware_list[i]);
623
624 ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
625
626 ret = cgs_get_firmware_info(smumgr->device,
627 ucode_id, &info);
628
629 if (ret == 0) {
630 cz_smu->driver_buffer[i].mc_addr_high =
631 smu_upper_32_bits(info.mc_addr);
632
633 cz_smu->driver_buffer[i].mc_addr_low =
634 smu_lower_32_bits(info.mc_addr);
635
636 cz_smu->driver_buffer[i].data_size = info.image_size;
637
638 cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
639 cz_smu->driver_buffer_length++;
640 }
641 }
642
643 return 0;
644}
645
646static int cz_smu_populate_single_scratch_entry(
647 struct pp_smumgr *smumgr,
648 enum cz_scratch_entry scratch_type,
649 uint32_t ulsize_byte,
650 struct cz_buffer_entry *entry)
651{
652 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
653 long long mc_addr =
654 ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
655 | cz_smu->smu_buffer.mc_addr_low;
656
657 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
658
659 mc_addr += cz_smu->smu_buffer_used_bytes;
660
661 entry->data_size = ulsize_byte;
662 entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
663 cz_smu->smu_buffer_used_bytes;
664 entry->mc_addr_low = smu_lower_32_bits(mc_addr);
665 entry->mc_addr_high = smu_upper_32_bits(mc_addr);
666 entry->firmware_ID = scratch_type;
667
668 cz_smu->smu_buffer_used_bytes += ulsize_aligned;
669
670 return 0;
671}
672
673static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
674{
675 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
676 unsigned long i;
677
678 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
679 if (cz_smu->scratch_buffer[i].firmware_ID
680 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
681 break;
682 }
683
684 *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
685
686 cz_send_msg_to_smc_with_parameter(smumgr,
687 PPSMC_MSG_SetClkTableAddrHi,
688 cz_smu->scratch_buffer[i].mc_addr_high);
689
690 cz_send_msg_to_smc_with_parameter(smumgr,
691 PPSMC_MSG_SetClkTableAddrLo,
692 cz_smu->scratch_buffer[i].mc_addr_low);
693
694 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
695 cz_smu->toc_entry_clock_table);
696
697 cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
698
699 return 0;
700}
701
702static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
703{
704 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
705 unsigned long i;
706
707 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
708 if (cz_smu->scratch_buffer[i].firmware_ID
709 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
710 break;
711 }
712
713 cz_send_msg_to_smc_with_parameter(smumgr,
714 PPSMC_MSG_SetClkTableAddrHi,
715 cz_smu->scratch_buffer[i].mc_addr_high);
716
717 cz_send_msg_to_smc_with_parameter(smumgr,
718 PPSMC_MSG_SetClkTableAddrLo,
719 cz_smu->scratch_buffer[i].mc_addr_low);
720
721 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
722 cz_smu->toc_entry_clock_table);
723
724 cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
725
726 return 0;
727}
728
729static int cz_smu_init(struct pp_smumgr *smumgr)
730{
731 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
732 uint64_t mc_addr = 0;
733 int ret = 0;
734
735 cz_smu->toc_buffer.data_size = 4096;
736 cz_smu->smu_buffer.data_size =
737 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
738 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
739 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
740 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
741 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
742
743 ret = smu_allocate_memory(smumgr->device,
744 cz_smu->toc_buffer.data_size,
745 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
746 PAGE_SIZE,
747 &mc_addr,
748 &cz_smu->toc_buffer.kaddr,
749 &cz_smu->toc_buffer.handle);
750 if (ret != 0)
751 return -1;
752
753 cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
754 cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
755
756 ret = smu_allocate_memory(smumgr->device,
757 cz_smu->smu_buffer.data_size,
758 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
759 PAGE_SIZE,
760 &mc_addr,
761 &cz_smu->smu_buffer.kaddr,
762 &cz_smu->smu_buffer.handle);
763 if (ret != 0)
764 return -1;
765
766 cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
767 cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
768
769 cz_smu_populate_firmware_entries(smumgr);
770 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
771 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
772 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
773 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
774 printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
775 return -1;
776 }
777
778 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
779 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
780 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
781 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
782 printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
783 return -1;
784 }
785 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
786 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
787 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
788 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
789 printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
790 return -1;
791 }
792
793 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
794 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
795 sizeof(struct SMU8_MultimediaPowerLogData),
796 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
797 printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
798 return -1;
799 }
800
801 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
802 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
803 sizeof(struct SMU8_Fusion_ClkTable),
804 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
805 printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
806 return -1;
807 }
808 cz_smu_construct_toc(smumgr);
809
810 return 0;
811}
812
813static int cz_smu_fini(struct pp_smumgr *smumgr)
814{
815 struct cz_smumgr *cz_smu;
816
817 if (smumgr == NULL || smumgr->device == NULL)
818 return -EINVAL;
819
820 cz_smu = (struct cz_smumgr *)smumgr->backend;
821 if (cz_smu) {
822 cgs_free_gpu_mem(smumgr->device,
823 cz_smu->toc_buffer.handle);
824 cgs_free_gpu_mem(smumgr->device,
825 cz_smu->smu_buffer.handle);
826 kfree(cz_smu);
827 kfree(smumgr);
828 }
829
830 return 0;
831}
832
833static const struct pp_smumgr_func cz_smu_funcs = {
834 .smu_init = cz_smu_init,
835 .smu_fini = cz_smu_fini,
836 .start_smu = cz_start_smu,
837 .check_fw_load_finish = cz_check_fw_load_finish,
838 .request_smu_load_fw = NULL,
839 .request_smu_load_specific_fw = NULL,
840 .get_argument = cz_smum_get_argument,
841 .send_msg_to_smc = cz_send_msg_to_smc,
842 .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
843 .download_pptable_settings = cz_download_pptable_settings,
844 .upload_pptable_settings = cz_upload_pptable_settings,
845};
846
847int cz_smum_init(struct pp_smumgr *smumgr)
848{
849 struct cz_smumgr *cz_smu;
850
851 cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
852 if (cz_smu == NULL)
853 return -ENOMEM;
854
855 smumgr->backend = cz_smu;
856 smumgr->smumgr_funcs = &cz_smu_funcs;
857 return 0;
858}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
new file mode 100644
index 000000000000..883818039248
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _CZ_SMUMGR_H_
24#define _CZ_SMUMGR_H_
25
26
27#define MAX_NUM_FIRMWARE 8
28#define MAX_NUM_SCRATCH 11
29#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
30#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
31#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024
32#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4)
33
34enum cz_scratch_entry {
35 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
36 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
37 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
38 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
39 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
40 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
41 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
42 CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
43 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
44 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
45 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
46 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
47 CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
48 CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
49 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
50 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
51 CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
52 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
53 CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
54 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
55 CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
56 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
57};
58
59struct cz_buffer_entry {
60 uint32_t data_size;
61 uint32_t mc_addr_low;
62 uint32_t mc_addr_high;
63 void *kaddr;
64 enum cz_scratch_entry firmware_ID;
65 unsigned long handle; /* as bo handle used when release bo */
66};
67
68struct cz_register_index_data_pair {
69 uint32_t offset;
70 uint32_t value;
71};
72
73struct cz_ih_meta_data {
74 uint32_t command;
75 struct cz_register_index_data_pair register_index_value_pair[1];
76};
77
78struct cz_smumgr {
79 uint8_t driver_buffer_length;
80 uint8_t scratch_buffer_length;
81 uint16_t toc_entry_used_count;
82 uint16_t toc_entry_initialize_index;
83 uint16_t toc_entry_power_profiling_index;
84 uint16_t toc_entry_aram;
85 uint16_t toc_entry_ih_register_restore_task_index;
86 uint16_t toc_entry_clock_table;
87 uint16_t ih_register_restore_task_size;
88 uint16_t smu_buffer_used_bytes;
89
90 struct cz_buffer_entry toc_buffer;
91 struct cz_buffer_entry smu_buffer;
92 struct cz_buffer_entry firmware_buffer;
93 struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
94 struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
95 struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
96};
97
98struct pp_smumgr;
99
100extern int cz_smum_init(struct pp_smumgr *smumgr);
101
102#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
new file mode 100644
index 000000000000..cdbb9f89bf36
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -0,0 +1,1042 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "smumgr.h"
25#include "smu73.h"
26#include "smu_ucode_xfer_vi.h"
27#include "fiji_smumgr.h"
28#include "fiji_ppsmc.h"
29#include "smu73_discrete.h"
30#include "ppatomctrl.h"
31#include "smu/smu_7_1_3_d.h"
32#include "smu/smu_7_1_3_sh_mask.h"
33#include "gmc/gmc_8_1_d.h"
34#include "gmc/gmc_8_1_sh_mask.h"
35#include "oss/oss_3_0_d.h"
36#include "gca/gfx_8_0_d.h"
37#include "bif/bif_5_0_d.h"
38#include "bif/bif_5_0_sh_mask.h"
39#include "pp_debug.h"
40#include "fiji_pwrvirus.h"
41
42#define AVFS_EN_MSB 1568
43#define AVFS_EN_LSB 1568
44
45#define FIJI_SMC_SIZE 0x20000
46
47struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
48 /* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */
49 /* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
50 { 0x3c0fd047, 0x30750000, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0x21680000, 0x0c000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
51 { 0xa00fd047, 0x409c0000, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
52 { 0x0410d047, 0x50c30000, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0x21680000, 0x0d000000, 0, 0, 0x0e, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
53 { 0x6810d047, 0x60ea0000, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0x21680000, 0x0e000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
54 { 0xcc10d047, 0xe8fd0000, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0x21680000, 0x0f000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
55 { 0x3011d047, 0x70110100, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0x21680000, 0x10000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
56 { 0x9411d047, 0xf8240100, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
57 { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
58};
59
60static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type)
61{
62 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
63
64 switch (fw_type) {
65 case UCODE_ID_SMU:
66 result = CGS_UCODE_ID_SMU;
67 break;
68 case UCODE_ID_SDMA0:
69 result = CGS_UCODE_ID_SDMA0;
70 break;
71 case UCODE_ID_SDMA1:
72 result = CGS_UCODE_ID_SDMA1;
73 break;
74 case UCODE_ID_CP_CE:
75 result = CGS_UCODE_ID_CP_CE;
76 break;
77 case UCODE_ID_CP_PFP:
78 result = CGS_UCODE_ID_CP_PFP;
79 break;
80 case UCODE_ID_CP_ME:
81 result = CGS_UCODE_ID_CP_ME;
82 break;
83 case UCODE_ID_CP_MEC:
84 result = CGS_UCODE_ID_CP_MEC;
85 break;
86 case UCODE_ID_CP_MEC_JT1:
87 result = CGS_UCODE_ID_CP_MEC_JT1;
88 break;
89 case UCODE_ID_CP_MEC_JT2:
90 result = CGS_UCODE_ID_CP_MEC_JT2;
91 break;
92 case UCODE_ID_RLC_G:
93 result = CGS_UCODE_ID_RLC_G;
94 break;
95 default:
96 break;
97 }
98
99 return result;
100}
101/**
102* Set the address for reading/writing the SMC SRAM space.
103* @param smumgr the address of the powerplay hardware manager.
104* @param smc_addr the address in the SMC RAM to access.
105*/
106static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr,
107 uint32_t smc_addr, uint32_t limit)
108{
109 PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)),
110 "SMC address must be 4 byte aligned.", return -EINVAL;);
111 PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)),
112 "SMC address is beyond the SMC RAM area.", return -EINVAL;);
113
114 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr);
115 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
116
117 return 0;
118}
119
120/**
121* Copy bytes from an array into the SMC RAM space.
122*
123* @param smumgr the address of the powerplay SMU manager.
124* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
125* @param src the byte array to copy the bytes from.
126* @param byteCount the number of bytes to copy.
127*/
128int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
129 uint32_t smcStartAddress, const uint8_t *src,
130 uint32_t byteCount, uint32_t limit)
131{
132 int result;
133 uint32_t data, originalData;
134 uint32_t addr, extraShift;
135
136 PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
137 "SMC address must be 4 byte aligned.", return -EINVAL;);
138 PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
139 "SMC address is beyond the SMC RAM area.", return -EINVAL;);
140
141 addr = smcStartAddress;
142
143 while (byteCount >= 4) {
144 /* Bytes are written into the SMC addres space with the MSB first. */
145 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
146
147 result = fiji_set_smc_sram_address(smumgr, addr, limit);
148 if (result)
149 return result;
150
151 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
152
153 src += 4;
154 byteCount -= 4;
155 addr += 4;
156 }
157
158 if (byteCount) {
159 /* Now write the odd bytes left.
160 * Do a read modify write cycle.
161 */
162 data = 0;
163
164 result = fiji_set_smc_sram_address(smumgr, addr, limit);
165 if (result)
166 return result;
167
168 originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
169 extraShift = 8 * (4 - byteCount);
170
171 while (byteCount > 0) {
172 /* Bytes are written into the SMC addres
173 * space with the MSB first.
174 */
175 data = (0x100 * data) + *src++;
176 byteCount--;
177 }
178 data <<= extraShift;
179 data |= (originalData & ~((~0UL) << extraShift));
180
181 result = fiji_set_smc_sram_address(smumgr, addr, limit);
182 if (!result)
183 return result;
184
185 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
186 }
187 return 0;
188}
189
190int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
191{
192 static unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
193
194 fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
195
196 return 0;
197}
198
199/**
200* Return if the SMC is currently running.
201*
202* @param smumgr the address of the powerplay hardware manager.
203*/
204bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr)
205{
206 return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
207 CGS_IND_REG__SMC,
208 SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
209 && (0x20100 <= cgs_read_ind_register(smumgr->device,
210 CGS_IND_REG__SMC, ixSMC_PC_C)));
211}
212
213/**
214* Send a message to the SMC, and wait for its response.
215*
216* @param smumgr the address of the powerplay hardware manager.
217* @param msg the message to send.
218* @return The response that came from the SMC.
219*/
220int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
221{
222 if (!fiji_is_smc_ram_running(smumgr))
223 return -1;
224
225 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
226 printk(KERN_ERR "Failed to send Previous Message.");
227 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
228 }
229
230 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
231 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
232
233 return 0;
234}
235
236/**
237 * Send a message to the SMC with parameter
238 * @param smumgr: the address of the powerplay hardware manager.
239 * @param msg: the message to send.
240 * @param parameter: the parameter to send
241 * @return The response that came from the SMC.
242 */
243int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
244 uint16_t msg, uint32_t parameter)
245{
246 if (!fiji_is_smc_ram_running(smumgr))
247 return -1;
248
249 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
250 printk(KERN_ERR "Failed to send Previous Message.");
251 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
252 }
253
254 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
255 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
256 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
257
258 return 0;
259}
260
261
262/**
263* Send a message to the SMC with parameter, do not wait for response
264*
265* @param smumgr: the address of the powerplay hardware manager.
266* @param msg: the message to send.
267* @param parameter: the parameter to send
268* @return The response that came from the SMC.
269*/
270int fiji_send_msg_to_smc_with_parameter_without_waiting(
271 struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
272{
273 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
274 printk(KERN_ERR "Failed to send Previous Message.");
275 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
276 }
277 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
278 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
279
280 return 0;
281}
282
283/**
284* Uploads the SMU firmware from .hex file
285*
286* @param smumgr the address of the powerplay SMU manager.
287* @return 0 or -1.
288*/
289
290static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr)
291{
292 const uint8_t *src;
293 uint32_t byte_count;
294 uint32_t *data;
295 struct cgs_firmware_info info = {0};
296
297 cgs_get_firmware_info(smumgr->device,
298 fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
299
300 if (info.image_size & 3) {
301 printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n");
302 return -EINVAL;
303 }
304
305 if (info.image_size > FIJI_SMC_SIZE) {
306 printk(KERN_ERR "SMC address is beyond the SMC RAM area\n");
307 return -EINVAL;
308 }
309
310 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
311 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
312
313 byte_count = info.image_size;
314 src = (const uint8_t *)info.kptr;
315
316 data = (uint32_t *)src;
317 for (; byte_count >= 4; data++, byte_count -= 4)
318 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
319
320 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
321 return 0;
322}
323
324/**
325* Read a 32bit value from the SMC SRAM space.
326* ALL PARAMETERS ARE IN HOST BYTE ORDER.
327* @param smumgr the address of the powerplay hardware manager.
328* @param smc_addr the address in the SMC RAM to access.
329* @param value and output parameter for the data read from the SMC SRAM.
330*/
331int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
332 uint32_t *value, uint32_t limit)
333{
334 int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
335
336 if (result)
337 return result;
338
339 *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
340 return 0;
341}
342
343/**
344* Write a 32bit value to the SMC SRAM space.
345* ALL PARAMETERS ARE IN HOST BYTE ORDER.
346* @param smumgr the address of the powerplay hardware manager.
347* @param smc_addr the address in the SMC RAM to access.
348* @param value to write to the SMC SRAM.
349*/
350int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
351 uint32_t value, uint32_t limit)
352{
353 int result;
354
355 result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
356
357 if (result)
358 return result;
359
360 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
361 return 0;
362}
363
364static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type)
365{
366 uint32_t result = 0;
367
368 switch (fw_type) {
369 case UCODE_ID_SDMA0:
370 result = UCODE_ID_SDMA0_MASK;
371 break;
372 case UCODE_ID_SDMA1:
373 result = UCODE_ID_SDMA1_MASK;
374 break;
375 case UCODE_ID_CP_CE:
376 result = UCODE_ID_CP_CE_MASK;
377 break;
378 case UCODE_ID_CP_PFP:
379 result = UCODE_ID_CP_PFP_MASK;
380 break;
381 case UCODE_ID_CP_ME:
382 result = UCODE_ID_CP_ME_MASK;
383 break;
384 case UCODE_ID_CP_MEC_JT1:
385 result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
386 break;
387 case UCODE_ID_CP_MEC_JT2:
388 result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK;
389 break;
390 case UCODE_ID_RLC_G:
391 result = UCODE_ID_RLC_G_MASK;
392 break;
393 default:
394 printk(KERN_ERR "UCode type is out of range!");
395 result = 0;
396 }
397
398 return result;
399}
400
401/* Populate one firmware image to the data structure */
402static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr,
403 uint32_t fw_type, struct SMU_Entry *entry)
404{
405 int result;
406 struct cgs_firmware_info info = {0};
407
408 result = cgs_get_firmware_info(
409 smumgr->device,
410 fiji_convert_fw_type_to_cgs(fw_type),
411 &info);
412
413 if (!result) {
414 entry->version = 0;
415 entry->id = (uint16_t)fw_type;
416 entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
417 entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
418 entry->meta_data_addr_high = 0;
419 entry->meta_data_addr_low = 0;
420 entry->data_size_byte = info.image_size;
421 entry->num_register_entries = 0;
422
423 if (fw_type == UCODE_ID_RLC_G)
424 entry->flags = 1;
425 else
426 entry->flags = 0;
427 }
428
429 return result;
430}
431
432static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr)
433{
434 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
435 uint32_t fw_to_load;
436 struct SMU_DRAMData_TOC *toc;
437
438 if (priv->soft_regs_start)
439 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
440 priv->soft_regs_start +
441 offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
442 0x0);
443
444 toc = (struct SMU_DRAMData_TOC *)priv->header;
445 toc->num_entries = 0;
446 toc->structure_version = 1;
447
448 PP_ASSERT_WITH_CODE(
449 0 == fiji_populate_single_firmware_entry(smumgr,
450 UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
451 "Failed to Get Firmware Entry.\n" , return -1 );
452 PP_ASSERT_WITH_CODE(
453 0 == fiji_populate_single_firmware_entry(smumgr,
454 UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
455 "Failed to Get Firmware Entry.\n" , return -1 );
456 PP_ASSERT_WITH_CODE(
457 0 == fiji_populate_single_firmware_entry(smumgr,
458 UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
459 "Failed to Get Firmware Entry.\n" , return -1 );
460 PP_ASSERT_WITH_CODE(
461 0 == fiji_populate_single_firmware_entry(smumgr,
462 UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
463 "Failed to Get Firmware Entry.\n" , return -1 );
464 PP_ASSERT_WITH_CODE(
465 0 == fiji_populate_single_firmware_entry(smumgr,
466 UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
467 "Failed to Get Firmware Entry.\n" , return -1 );
468 PP_ASSERT_WITH_CODE(
469 0 == fiji_populate_single_firmware_entry(smumgr,
470 UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
471 "Failed to Get Firmware Entry.\n" , return -1 );
472 PP_ASSERT_WITH_CODE(
473 0 == fiji_populate_single_firmware_entry(smumgr,
474 UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
475 "Failed to Get Firmware Entry.\n" , return -1 );
476 PP_ASSERT_WITH_CODE(
477 0 == fiji_populate_single_firmware_entry(smumgr,
478 UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
479 "Failed to Get Firmware Entry.\n" , return -1 );
480 PP_ASSERT_WITH_CODE(
481 0 == fiji_populate_single_firmware_entry(smumgr,
482 UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
483 "Failed to Get Firmware Entry.\n" , return -1 );
484
485 fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI,
486 priv->header_buffer.mc_addr_high);
487 fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO,
488 priv->header_buffer.mc_addr_low);
489
490 fw_to_load = UCODE_ID_RLC_G_MASK
491 + UCODE_ID_SDMA0_MASK
492 + UCODE_ID_SDMA1_MASK
493 + UCODE_ID_CP_CE_MASK
494 + UCODE_ID_CP_ME_MASK
495 + UCODE_ID_CP_PFP_MASK
496 + UCODE_ID_CP_MEC_MASK
497 + UCODE_ID_CP_MEC_JT1_MASK
498 + UCODE_ID_CP_MEC_JT2_MASK;
499
500 if (fiji_send_msg_to_smc_with_parameter(smumgr,
501 PPSMC_MSG_LoadUcodes, fw_to_load))
502 printk(KERN_ERR "Fail to Request SMU Load uCode");
503
504 return 0;
505}
506
507
508/* Check if the FW has been loaded, SMU will not return
509 * if loading has not finished.
510 */
511static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr,
512 uint32_t fw_type)
513{
514 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
515 uint32_t mask = fiji_get_mask_for_firmware_type(fw_type);
516
517 /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
518 if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX,
519 priv->soft_regs_start +
520 offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
521 mask, mask)) {
522 printk(KERN_ERR "check firmware loading failed\n");
523 return -EINVAL;
524 }
525 return 0;
526}
527
528
529static int fiji_reload_firmware(struct pp_smumgr *smumgr)
530{
531 return smumgr->smumgr_funcs->start_smu(smumgr);
532}
533
534static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr)
535{
536 uint32_t value;
537
538 value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER);
539 if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) {
540 /* driver reads on SR-IOV enabled PF: 0x80000000
541 * driver reads on SR-IOV enabled VF: 0x80000001
542 * driver reads on SR-IOV disabled: 0x00000000
543 */
544 return true;
545 }
546 return false;
547}
548
549static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type)
550{
551 if (fiji_is_hw_virtualization_enabled(smumgr)) {
552 uint32_t masks = fiji_get_mask_for_firmware_type(fw_type);
553 if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr,
554 PPSMC_MSG_LoadUcodes, masks))
555 printk(KERN_ERR "Fail to Request SMU Load uCode");
556 }
557 /* For non-virtualization cases,
558 * SMU loads all FWs at once in fiji_request_smu_load_fw.
559 */
560 return 0;
561}
562
563static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
564{
565 int result = 0;
566
567 /* Wait for smc boot up */
568 /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
569 RCU_UC_EVENTS, boot_seq_done, 0); */
570
571 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
572 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
573
574 result = fiji_upload_smu_firmware_image(smumgr);
575 if (result)
576 return result;
577
578 /* Clear status */
579 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
580 ixSMU_STATUS, 0);
581
582 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
583 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
584
585 /* De-assert reset */
586 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
587 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
588
589 /* Wait for ROM firmware to initialize interrupt hendler */
590 /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
591 SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */
592
593 /* Set SMU Auto Start */
594 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
595 SMU_INPUT_DATA, AUTO_START, 1);
596
597 /* Clear firmware interrupt enable flag */
598 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
599 ixFIRMWARE_FLAGS, 0);
600
601 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS,
602 INTERRUPTS_ENABLED, 1);
603
604 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
605 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
606 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
607
608 /* Wait for done bit to be set */
609 SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
610 SMU_STATUS, SMU_DONE, 0);
611
612 /* Check pass/failed indicator */
613 if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
614 SMU_STATUS, SMU_PASS)) {
615 PP_ASSERT_WITH_CODE(false,
616 "SMU Firmware start failed!", return -1);
617 }
618
619 /* Wait for firmware to initialize */
620 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
621 FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
622
623 return result;
624}
625
626static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
627{
628 int result = 0;
629
630 /* wait for smc boot up */
631 SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
632 RCU_UC_EVENTS, boot_seq_done, 0);
633
634 /* Clear firmware interrupt enable flag */
635 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
636 ixFIRMWARE_FLAGS, 0);
637
638 /* Assert reset */
639 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
640 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
641
642 result = fiji_upload_smu_firmware_image(smumgr);
643 if (result)
644 return result;
645
646 /* Set smc instruct start point at 0x0 */
647 fiji_program_jump_on_start(smumgr);
648
649 /* Enable clock */
650 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
651 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
652
653 /* De-assert reset */
654 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
655 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
656
657 /* Wait for firmware to initialize */
658 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
659 FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
660
661 return result;
662}
663
664int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
665{
666 int i, result = -1;
667 uint32_t reg, data;
668 PWR_Command_Table *virus = PwrVirusTable;
669 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
670
671 priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS;
672 for (i = 0; (i < PWR_VIRUS_TABLE_SIZE); i++) {
673 switch (virus->command) {
674 case PwrCmdWrite:
675 reg = virus->reg;
676 data = virus->data;
677 cgs_write_register(smumgr->device, reg, data);
678 break;
679 case PwrCmdEnd:
680 priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_LOADED;
681 result = 0;
682 break;
683 default:
684 printk(KERN_ERR "Table Exit with Invalid Command!");
685 priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
686 result = -1;
687 break;
688 }
689 virus++;
690 }
691 return result;
692}
693
694static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
695{
696 int result = 0;
697 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
698
699 priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
700 if (priv->avfs.AvfsBtcParam) {
701 if (!fiji_send_msg_to_smc_with_parameter(smumgr,
702 PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
703 if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
704 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
705 result = 0;
706 } else {
707 printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
708 " to Enable AVFS Failed!");
709 fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
710 result = -1;
711 }
712 } else {
713 printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] "
714 "PerformBTC SMU msg failed");
715 result = -1;
716 }
717 }
718 /* Soft-Reset to reset the engine before loading uCode */
719 /* halt */
720 cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
721 /* reset everything */
722 cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
723 /* clear reset */
724 cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
725
726 return result;
727}
728
729int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
730{
731 int result = 0;
732 uint32_t table_start;
733 uint32_t charz_freq_addr, inversion_voltage_addr, charz_freq;
734 uint16_t inversion_voltage;
735
736 charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
737 inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
738
739 PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
740 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
741 PmFuseTable), &table_start, 0x40000),
742 "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
743 "starting address of PmFuse structure",
744 return -1;);
745
746 charz_freq_addr = table_start +
747 offsetof(struct SMU73_Discrete_PmFuses, PsmCharzFreq);
748 inversion_voltage_addr = table_start +
749 offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
750
751 result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr,
752 (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
753 PP_ASSERT_WITH_CODE(0 == result,
754 "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
755 "be populated.", return -1;);
756
757 result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
758 (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
759 PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
760 "charz_freq could not be populated.", return -1;);
761
762 return result;
763}
764
765int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
766{
767 int32_t vr_config;
768 uint32_t table_start;
769 uint32_t level_addr, vr_config_addr;
770 uint32_t level_size = sizeof(avfs_graphics_level);
771
772 PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
773 SMU7_FIRMWARE_HEADER_LOCATION +
774 offsetof(SMU73_Firmware_Header, DpmTable),
775 &table_start, 0x40000),
776 "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not "
777 "communicate starting address of DPM table",
778 return -1;);
779
780 /* Default value for vr_config =
781 * VR_MERGED_WITH_VDDC + VR_STATIC_VOLTAGE(VDDCI) */
782 vr_config = 0x01000500; /* Real value:0x50001 */
783
784 vr_config_addr = table_start +
785 offsetof(SMU73_Discrete_DpmTable, VRConfig);
786
787 PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr,
788 (uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
789 "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
790 "vr_config value over to SMC",
791 return -1;);
792
793 level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
794
795 PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr,
796 (uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
797 "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
798 return -1;);
799
800 return 0;
801}
802
803/* Work in Progress */
804int fiji_restore_vft_table(struct pp_smumgr *smumgr)
805{
806 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
807
808 if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
809 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
810 return 0;
811 } else
812 return -EINVAL;
813}
814
815/* Work in Progress */
816int fiji_save_vft_table(struct pp_smumgr *smumgr)
817{
818 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
819
820 if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
821 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
822 return 0;
823 } else
824 return -EINVAL;
825}
826
827int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
828{
829 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
830
831 switch (priv->avfs.AvfsBtcStatus) {
832 case AVFS_BTC_COMPLETED_SAVED: /*S3 State - Pre SMU Start */
833 priv->avfs.AvfsBtcStatus = AVFS_BTC_RESTOREVFT_FAILED;
834 PP_ASSERT_WITH_CODE(0 == fiji_restore_vft_table(smumgr),
835 "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics "
836 "Level table over to SMU",
837 return -1;);
838 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
839 break;
840 case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
841 priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
842 PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
843 PPSMC_MSG_VftTableIsValid),
844 "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
845 "correctly to VftTableIsValid Msg",
846 return -1;);
847 priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
848 PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
849 PPSMC_MSG_EnableAvfs),
850 "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
851 "correctly to EnableAvfs Message Msg",
852 return -1;);
853 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
854 break;
855 case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
856 if (!smu_started)
857 break;
858 priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
859 PP_ASSERT_WITH_CODE(0 == fiji_setup_pm_fuse_for_avfs(smumgr),
860 "[AVFS][fiji_avfs_event_mgr] Failure at "
861 "fiji_setup_pm_fuse_for_avfs",
862 return -1;);
863 priv->avfs.AvfsBtcStatus = AVFS_BTC_DPMTABLESETUP_FAILED;
864 PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
865 "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
866 " table over to SMU",
867 return -1;);
868 priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
869 PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
870 "[AVFS][fiji_avfs_event_mgr] Could not setup "
871 "Pwr Virus for AVFS ",
872 return -1;);
873 priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
874 PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
875 "[AVFS][fiji_avfs_event_mgr] Failure at "
876 "fiji_start_avfs_btc. AVFS Disabled",
877 return -1;);
878 priv->avfs.AvfsBtcStatus = AVFS_BTC_SAVEVFT_FAILED;
879 PP_ASSERT_WITH_CODE(0 == fiji_save_vft_table(smumgr),
880 "[AVFS][fiji_avfs_event_mgr] Could not save VFT Table",
881 return -1;);
882 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
883 break;
884 case AVFS_BTC_DISABLED: /* Do nothing */
885 break;
886 case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
887 break;
888 default:
889 printk(KERN_ERR "[AVFS] Something is broken. See log!");
890 break;
891 }
892 return 0;
893}
894
895static int fiji_start_smu(struct pp_smumgr *smumgr)
896{
897 int result = 0;
898 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
899
900 /* Only start SMC if SMC RAM is not running */
901 if (!fiji_is_smc_ram_running(smumgr)) {
902 fiji_avfs_event_mgr(smumgr, false);
903
904 /* Check if SMU is running in protected mode */
905 if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
906 CGS_IND_REG__SMC,
907 SMU_FIRMWARE, SMU_MODE)) {
908 result = fiji_start_smu_in_non_protection_mode(smumgr);
909 if (result)
910 return result;
911 } else {
912 result = fiji_start_smu_in_protection_mode(smumgr);
913 if (result)
914 return result;
915 }
916 fiji_avfs_event_mgr(smumgr, true);
917 }
918
919 /* To initialize all clock gating before RLC loaded and running.*/
920 cgs_set_clockgating_state(smumgr->device,
921 AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
922 cgs_set_clockgating_state(smumgr->device,
923 AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
924 cgs_set_clockgating_state(smumgr->device,
925 AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
926 cgs_set_clockgating_state(smumgr->device,
927 AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
928
929 /* Setup SoftRegsStart here for register lookup in case
930 * DummyBackEnd is used and ProcessFirmwareHeader is not executed
931 */
932 fiji_read_smc_sram_dword(smumgr,
933 SMU7_FIRMWARE_HEADER_LOCATION +
934 offsetof(SMU73_Firmware_Header, SoftRegisters),
935 &(priv->soft_regs_start), 0x40000);
936
937 result = fiji_request_smu_load_fw(smumgr);
938
939 return result;
940}
941
942static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
943{
944
945 uint32_t efuse = 0;
946 uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
947
948 if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
949 mask, &efuse)) {
950 if (efuse)
951 return true;
952 }
953 return false;
954}
955
956/**
957* Write a 32bit value to the SMC SRAM space.
958* ALL PARAMETERS ARE IN HOST BYTE ORDER.
959* @param smumgr the address of the powerplay hardware manager.
960* @param smc_addr the address in the SMC RAM to access.
961* @param value to write to the SMC SRAM.
962*/
963static int fiji_smu_init(struct pp_smumgr *smumgr)
964{
965 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
966 uint64_t mc_addr;
967
968 priv->header_buffer.data_size =
969 ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
970 smu_allocate_memory(smumgr->device,
971 priv->header_buffer.data_size,
972 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
973 PAGE_SIZE,
974 &mc_addr,
975 &priv->header_buffer.kaddr,
976 &priv->header_buffer.handle);
977
978 priv->header = priv->header_buffer.kaddr;
979 priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
980 priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
981
982 PP_ASSERT_WITH_CODE((NULL != priv->header),
983 "Out of memory.",
984 kfree(smumgr->backend);
985 cgs_free_gpu_mem(smumgr->device,
986 (cgs_handle_t)priv->header_buffer.handle);
987 return -1);
988
989 priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
990 if (fiji_is_hw_avfs_present(smumgr))
991 /* AVFS Parameter
992 * 0 - BTC DC disabled, BTC AC disabled
993 * 1 - BTC DC enabled, BTC AC disabled
994 * 2 - BTC DC disabled, BTC AC enabled
995 * 3 - BTC DC enabled, BTC AC enabled
996 * Default is 0 - BTC DC disabled, BTC AC disabled
997 */
998 priv->avfs.AvfsBtcParam = 0;
999 else
1000 priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
1001
1002 priv->acpi_optimization = 1;
1003
1004 return 0;
1005}
1006
1007static int fiji_smu_fini(struct pp_smumgr *smumgr)
1008{
1009 if (smumgr->backend) {
1010 kfree(smumgr->backend);
1011 smumgr->backend = NULL;
1012 }
1013 return 0;
1014}
1015
1016static const struct pp_smumgr_func fiji_smu_funcs = {
1017 .smu_init = &fiji_smu_init,
1018 .smu_fini = &fiji_smu_fini,
1019 .start_smu = &fiji_start_smu,
1020 .check_fw_load_finish = &fiji_check_fw_load_finish,
1021 .request_smu_load_fw = &fiji_reload_firmware,
1022 .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load,
1023 .send_msg_to_smc = &fiji_send_msg_to_smc,
1024 .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter,
1025 .download_pptable_settings = NULL,
1026 .upload_pptable_settings = NULL,
1027};
1028
1029int fiji_smum_init(struct pp_smumgr *smumgr)
1030{
1031 struct fiji_smumgr *fiji_smu = NULL;
1032
1033 fiji_smu = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL);
1034
1035 if (fiji_smu == NULL)
1036 return -ENOMEM;
1037
1038 smumgr->backend = fiji_smu;
1039 smumgr->smumgr_funcs = &fiji_smu_funcs;
1040
1041 return 0;
1042}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
new file mode 100644
index 000000000000..8cd22d9c9140
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _FIJI_SMUMANAGER_H_
24#define _FIJI_SMUMANAGER_H_
25
26enum AVFS_BTC_STATUS {
27 AVFS_BTC_BOOT = 0,
28 AVFS_BTC_BOOT_STARTEDSMU,
29 AVFS_LOAD_VIRUS,
30 AVFS_BTC_VIRUS_LOADED,
31 AVFS_BTC_VIRUS_FAIL,
32 AVFS_BTC_STARTED,
33 AVFS_BTC_FAILED,
34 AVFS_BTC_RESTOREVFT_FAILED,
35 AVFS_BTC_SAVEVFT_FAILED,
36 AVFS_BTC_DPMTABLESETUP_FAILED,
37 AVFS_BTC_COMPLETED_UNSAVED,
38 AVFS_BTC_COMPLETED_SAVED,
39 AVFS_BTC_COMPLETED_RESTORED,
40 AVFS_BTC_DISABLED,
41 AVFS_BTC_NOTSUPPORTED,
42 AVFS_BTC_SMUMSG_ERROR
43};
44
45struct fiji_smu_avfs {
46 enum AVFS_BTC_STATUS AvfsBtcStatus;
47 uint32_t AvfsBtcParam;
48};
49
50struct fiji_buffer_entry {
51 uint32_t data_size;
52 uint32_t mc_addr_low;
53 uint32_t mc_addr_high;
54 void *kaddr;
55 unsigned long handle;
56};
57
58struct fiji_smumgr {
59 uint8_t *header;
60 uint8_t *mec_image;
61 uint32_t soft_regs_start;
62 struct fiji_smu_avfs avfs;
63 uint32_t acpi_optimization;
64
65 struct fiji_buffer_entry header_buffer;
66};
67
68int fiji_smum_init(struct pp_smumgr *smumgr);
69int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
70 uint32_t *value, uint32_t limit);
71int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
72 uint32_t value, uint32_t limit);
73int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress,
74 const uint8_t *src, uint32_t byteCount, uint32_t limit);
75
76#endif
77
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
new file mode 100644
index 000000000000..063ae71c9830
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include "pp_instance.h"
27#include "smumgr.h"
28#include "cgs_common.h"
29#include "linux/delay.h"
30#include "cz_smumgr.h"
31#include "tonga_smumgr.h"
32#include "fiji_smumgr.h"
33
34int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
35{
36 struct pp_smumgr *smumgr;
37
38 if ((handle == NULL) || (pp_init == NULL))
39 return -EINVAL;
40
41 smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
42 if (smumgr == NULL)
43 return -ENOMEM;
44
45 smumgr->device = pp_init->device;
46 smumgr->chip_family = pp_init->chip_family;
47 smumgr->chip_id = pp_init->chip_id;
48 smumgr->hw_revision = pp_init->rev_id;
49 smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
50 smumgr->reload_fw = 1;
51 handle->smu_mgr = smumgr;
52
53 switch (smumgr->chip_family) {
54 case AMD_FAMILY_CZ:
55 cz_smum_init(smumgr);
56 break;
57 case AMD_FAMILY_VI:
58 switch (smumgr->chip_id) {
59 case CHIP_TONGA:
60 tonga_smum_init(smumgr);
61 break;
62 case CHIP_FIJI:
63 fiji_smum_init(smumgr);
64 break;
65 default:
66 return -EINVAL;
67 }
68 break;
69 default:
70 kfree(smumgr);
71 return -EINVAL;
72 }
73
74 return 0;
75}
76
77int smum_fini(struct pp_smumgr *smumgr)
78{
79 kfree(smumgr);
80 return 0;
81}
82
83int smum_get_argument(struct pp_smumgr *smumgr)
84{
85 if (NULL != smumgr->smumgr_funcs->get_argument)
86 return smumgr->smumgr_funcs->get_argument(smumgr);
87
88 return 0;
89}
90
91int smum_download_powerplay_table(struct pp_smumgr *smumgr,
92 void **table)
93{
94 if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
95 return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
96 table);
97
98 return 0;
99}
100
101int smum_upload_powerplay_table(struct pp_smumgr *smumgr)
102{
103 if (NULL != smumgr->smumgr_funcs->upload_pptable_settings)
104 return smumgr->smumgr_funcs->upload_pptable_settings(smumgr);
105
106 return 0;
107}
108
109int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
110{
111 if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL)
112 return -EINVAL;
113
114 return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg);
115}
116
117int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
118 uint16_t msg, uint32_t parameter)
119{
120 if (smumgr == NULL ||
121 smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
122 return -EINVAL;
123 return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter(
124 smumgr, msg, parameter);
125}
126
127/*
128 * Returns once the part of the register indicated by the mask has
129 * reached the given value.
130 */
131int smum_wait_on_register(struct pp_smumgr *smumgr,
132 uint32_t index,
133 uint32_t value, uint32_t mask)
134{
135 uint32_t i;
136 uint32_t cur_value;
137
138 if (smumgr == NULL || smumgr->device == NULL)
139 return -EINVAL;
140
141 for (i = 0; i < smumgr->usec_timeout; i++) {
142 cur_value = cgs_read_register(smumgr->device, index);
143 if ((cur_value & mask) == (value & mask))
144 break;
145 udelay(1);
146 }
147
148 /* timeout means wrong logic*/
149 if (i == smumgr->usec_timeout)
150 return -1;
151
152 return 0;
153}
154
155int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
156 uint32_t index,
157 uint32_t value, uint32_t mask)
158{
159 uint32_t i;
160 uint32_t cur_value;
161
162 if (smumgr == NULL)
163 return -EINVAL;
164
165 for (i = 0; i < smumgr->usec_timeout; i++) {
166 cur_value = cgs_read_register(smumgr->device,
167 index);
168 if ((cur_value & mask) != (value & mask))
169 break;
170 udelay(1);
171 }
172
173 /* timeout means wrong logic */
174 if (i == smumgr->usec_timeout)
175 return -1;
176
177 return 0;
178}
179
180
181/*
182 * Returns once the part of the register indicated by the mask
183 * has reached the given value.The indirect space is described by
184 * giving the memory-mapped index of the indirect index register.
185 */
186int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
187 uint32_t indirect_port,
188 uint32_t index,
189 uint32_t value,
190 uint32_t mask)
191{
192 if (smumgr == NULL || smumgr->device == NULL)
193 return -EINVAL;
194
195 cgs_write_register(smumgr->device, indirect_port, index);
196 return smum_wait_on_register(smumgr, indirect_port + 1,
197 mask, value);
198}
199
200void smum_wait_for_indirect_register_unequal(
201 struct pp_smumgr *smumgr,
202 uint32_t indirect_port,
203 uint32_t index,
204 uint32_t value,
205 uint32_t mask)
206{
207 if (smumgr == NULL || smumgr->device == NULL)
208 return;
209 cgs_write_register(smumgr->device, indirect_port, index);
210 smum_wait_for_register_unequal(smumgr, indirect_port + 1,
211 value, mask);
212}
213
214int smu_allocate_memory(void *device, uint32_t size,
215 enum cgs_gpu_mem_type type,
216 uint32_t byte_align, uint64_t *mc_addr,
217 void **kptr, void *handle)
218{
219 int ret = 0;
220 cgs_handle_t cgs_handle;
221
222 if (device == NULL || handle == NULL ||
223 mc_addr == NULL || kptr == NULL)
224 return -EINVAL;
225
226 ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
227 0, 0, (cgs_handle_t *)handle);
228 if (ret)
229 return -ENOMEM;
230
231 cgs_handle = *(cgs_handle_t *)handle;
232
233 ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr);
234 if (ret)
235 goto error_gmap;
236
237 ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr);
238 if (ret)
239 goto error_kmap;
240
241 return 0;
242
243error_kmap:
244 cgs_gunmap_gpu_mem(device, cgs_handle);
245
246error_gmap:
247 cgs_free_gpu_mem(device, cgs_handle);
248 return ret;
249}
250
251int smu_free_memory(void *device, void *handle)
252{
253 cgs_handle_t cgs_handle = (cgs_handle_t)handle;
254
255 if (device == NULL || handle == NULL)
256 return -EINVAL;
257
258 cgs_kunmap_gpu_mem(device, cgs_handle);
259 cgs_gunmap_gpu_mem(device, cgs_handle);
260 cgs_free_gpu_mem(device, cgs_handle);
261
262 return 0;
263}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
new file mode 100644
index 000000000000..ebdb43a8daef
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -0,0 +1,819 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/gfp.h>
27
28#include "smumgr.h"
29#include "tonga_smumgr.h"
30#include "pp_debug.h"
31#include "smu_ucode_xfer_vi.h"
32#include "tonga_ppsmc.h"
33#include "smu/smu_7_1_2_d.h"
34#include "smu/smu_7_1_2_sh_mask.h"
35#include "cgs_common.h"
36
37#define TONGA_SMC_SIZE 0x20000
38#define BUFFER_SIZE 80000
39#define MAX_STRING_SIZE 15
40#define BUFFER_SIZETWO 131072 /*128 *1024*/
41
42/**
43* Set the address for reading/writing the SMC SRAM space.
44* @param smumgr the address of the powerplay hardware manager.
45* @param smcAddress the address in the SMC RAM to access.
46*/
47static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr,
48 uint32_t smcAddress, uint32_t limit)
49{
50 if (smumgr == NULL || smumgr->device == NULL)
51 return -EINVAL;
52 PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
53 "SMC address must be 4 byte aligned.",
54 return -1;);
55
56 PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
57 "SMC address is beyond the SMC RAM area.",
58 return -1;);
59
60 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
61 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
62
63 return 0;
64}
65
66/**
67* Copy bytes from an array into the SMC RAM space.
68*
69* @param smumgr the address of the powerplay SMU manager.
70* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
71* @param src the byte array to copy the bytes from.
72* @param byteCount the number of bytes to copy.
73*/
74int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
75 uint32_t smcStartAddress, const uint8_t *src,
76 uint32_t byteCount, uint32_t limit)
77{
78 uint32_t addr;
79 uint32_t data, orig_data;
80 int result = 0;
81 uint32_t extra_shift;
82
83 if (smumgr == NULL || smumgr->device == NULL)
84 return -EINVAL;
85 PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
86 "SMC address must be 4 byte aligned.",
87 return 0;);
88
89 PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
90 "SMC address is beyond the SMC RAM area.",
91 return 0;);
92
93 addr = smcStartAddress;
94
95 while (byteCount >= 4) {
96 /*
97 * Bytes are written into the
98 * SMC address space with the MSB first
99 */
100 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
101
102 result = tonga_set_smc_sram_address(smumgr, addr, limit);
103
104 if (result)
105 goto out;
106
107 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
108
109 src += 4;
110 byteCount -= 4;
111 addr += 4;
112 }
113
114 if (0 != byteCount) {
115 /* Now write odd bytes left, do a read modify write cycle */
116 data = 0;
117
118 result = tonga_set_smc_sram_address(smumgr, addr, limit);
119 if (result)
120 goto out;
121
122 orig_data = cgs_read_register(smumgr->device,
123 mmSMC_IND_DATA_0);
124 extra_shift = 8 * (4 - byteCount);
125
126 while (byteCount > 0) {
127 data = (data << 8) + *src++;
128 byteCount--;
129 }
130
131 data <<= extra_shift;
132 data |= (orig_data & ~((~0UL) << extra_shift));
133
134 result = tonga_set_smc_sram_address(smumgr, addr, limit);
135 if (result)
136 goto out;
137
138 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
139 }
140
141out:
142 return result;
143}
144
145
146int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
147{
148 static unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
149
150 tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
151
152 return 0;
153}
154
155/**
156* Return if the SMC is currently running.
157*
158* @param smumgr the address of the powerplay hardware manager.
159*/
160static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr)
161{
162 return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
163 SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
164 && (0x20100 <= cgs_read_ind_register(smumgr->device,
165 CGS_IND_REG__SMC, ixSMC_PC_C)));
166}
167
168static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
169{
170 if (smumgr == NULL || smumgr->device == NULL)
171 return -EINVAL;
172
173 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
174
175 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
176 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
177
178 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
179
180 return 0;
181}
182
183/**
184* Send a message to the SMC, and wait for its response.
185*
186* @param smumgr the address of the powerplay hardware manager.
187* @param msg the message to send.
188* @return The response that came from the SMC.
189*/
190static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
191{
192 if (smumgr == NULL || smumgr->device == NULL)
193 return -EINVAL;
194
195 if (!tonga_is_smc_ram_running(smumgr))
196 return -1;
197
198 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
199 PP_ASSERT_WITH_CODE(
200 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
201 "Failed to send Previous Message.",
202 );
203
204 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
205
206 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
207 PP_ASSERT_WITH_CODE(
208 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
209 "Failed to send Message.",
210 );
211
212 return 0;
213}
214
215/*
216* Send a message to the SMC, and do not wait for its response.
217*
218* @param smumgr the address of the powerplay hardware manager.
219* @param msg the message to send.
220* @return The response that came from the SMC.
221*/
222static int tonga_send_msg_to_smc_without_waiting
223 (struct pp_smumgr *smumgr, uint16_t msg)
224{
225 if (smumgr == NULL || smumgr->device == NULL)
226 return -EINVAL;
227
228 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
229 PP_ASSERT_WITH_CODE(
230 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
231 "Failed to send Previous Message.",
232 );
233 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
234
235 return 0;
236}
237
238/*
239* Send a message to the SMC with parameter
240*
241* @param smumgr: the address of the powerplay hardware manager.
242* @param msg: the message to send.
243* @param parameter: the parameter to send
244* @return The response that came from the SMC.
245*/
246static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
247 uint16_t msg, uint32_t parameter)
248{
249 if (smumgr == NULL || smumgr->device == NULL)
250 return -EINVAL;
251
252 if (!tonga_is_smc_ram_running(smumgr))
253 return PPSMC_Result_Failed;
254
255 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
256 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
257
258 return tonga_send_msg_to_smc(smumgr, msg);
259}
260
261/*
262* Send a message to the SMC with parameter, do not wait for response
263*
264* @param smumgr: the address of the powerplay hardware manager.
265* @param msg: the message to send.
266* @param parameter: the parameter to send
267* @return The response that came from the SMC.
268*/
269static int tonga_send_msg_to_smc_with_parameter_without_waiting(
270 struct pp_smumgr *smumgr,
271 uint16_t msg, uint32_t parameter)
272{
273 if (smumgr == NULL || smumgr->device == NULL)
274 return -EINVAL;
275
276 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
277
278 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
279
280 return tonga_send_msg_to_smc_without_waiting(smumgr, msg);
281}
282
283/*
284 * Read a 32bit value from the SMC SRAM space.
285 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
286 * @param smumgr the address of the powerplay hardware manager.
287 * @param smcAddress the address in the SMC RAM to access.
288 * @param value and output parameter for the data read from the SMC SRAM.
289 */
290int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr,
291 uint32_t smcAddress, uint32_t *value,
292 uint32_t limit)
293{
294 int result;
295
296 result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
297
298 if (0 != result)
299 return result;
300
301 *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
302
303 return 0;
304}
305
306/*
307 * Write a 32bit value to the SMC SRAM space.
308 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
309 * @param smumgr the address of the powerplay hardware manager.
310 * @param smcAddress the address in the SMC RAM to access.
311 * @param value to write to the SMC SRAM.
312 */
313int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
314 uint32_t smcAddress, uint32_t value,
315 uint32_t limit)
316{
317 int result;
318
319 result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
320
321 if (0 != result)
322 return result;
323
324 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
325
326 return 0;
327}
328
329static int tonga_smu_fini(struct pp_smumgr *smumgr)
330{
331 if (smumgr->backend != NULL) {
332 kfree(smumgr->backend);
333 smumgr->backend = NULL;
334 }
335 return 0;
336}
337
338static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type)
339{
340 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
341
342 switch (fw_type) {
343 case UCODE_ID_SMU:
344 result = CGS_UCODE_ID_SMU;
345 break;
346 case UCODE_ID_SDMA0:
347 result = CGS_UCODE_ID_SDMA0;
348 break;
349 case UCODE_ID_SDMA1:
350 result = CGS_UCODE_ID_SDMA1;
351 break;
352 case UCODE_ID_CP_CE:
353 result = CGS_UCODE_ID_CP_CE;
354 break;
355 case UCODE_ID_CP_PFP:
356 result = CGS_UCODE_ID_CP_PFP;
357 break;
358 case UCODE_ID_CP_ME:
359 result = CGS_UCODE_ID_CP_ME;
360 break;
361 case UCODE_ID_CP_MEC:
362 result = CGS_UCODE_ID_CP_MEC;
363 break;
364 case UCODE_ID_CP_MEC_JT1:
365 result = CGS_UCODE_ID_CP_MEC_JT1;
366 break;
367 case UCODE_ID_CP_MEC_JT2:
368 result = CGS_UCODE_ID_CP_MEC_JT2;
369 break;
370 case UCODE_ID_RLC_G:
371 result = CGS_UCODE_ID_RLC_G;
372 break;
373 default:
374 break;
375 }
376
377 return result;
378}
379
380/**
381 * Convert the PPIRI firmware type to SMU type mask.
382 * For MEC, we need to check all MEC related type
383*/
384static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType)
385{
386 uint16_t result = 0;
387
388 switch (firmwareType) {
389 case UCODE_ID_SDMA0:
390 result = UCODE_ID_SDMA0_MASK;
391 break;
392 case UCODE_ID_SDMA1:
393 result = UCODE_ID_SDMA1_MASK;
394 break;
395 case UCODE_ID_CP_CE:
396 result = UCODE_ID_CP_CE_MASK;
397 break;
398 case UCODE_ID_CP_PFP:
399 result = UCODE_ID_CP_PFP_MASK;
400 break;
401 case UCODE_ID_CP_ME:
402 result = UCODE_ID_CP_ME_MASK;
403 break;
404 case UCODE_ID_CP_MEC:
405 case UCODE_ID_CP_MEC_JT1:
406 case UCODE_ID_CP_MEC_JT2:
407 result = UCODE_ID_CP_MEC_MASK;
408 break;
409 case UCODE_ID_RLC_G:
410 result = UCODE_ID_RLC_G_MASK;
411 break;
412 default:
413 break;
414 }
415
416 return result;
417}
418
419/**
420 * Check if the FW has been loaded,
421 * SMU will not return if loading has not finished.
422*/
423static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
424{
425 uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType);
426
427 if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
428 SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) {
429 printk(KERN_ERR "[ powerplay ] check firmware loading failed\n");
430 return -EINVAL;
431 }
432
433 return 0;
434}
435
436/* Populate one firmware image to the data structure */
437static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr,
438 uint16_t firmware_type,
439 struct SMU_Entry *pentry)
440{
441 int result;
442 struct cgs_firmware_info info = {0};
443
444 result = cgs_get_firmware_info(
445 smumgr->device,
446 tonga_convert_fw_type_to_cgs(firmware_type),
447 &info);
448
449 if (result == 0) {
450 pentry->version = 0;
451 pentry->id = (uint16_t)firmware_type;
452 pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
453 pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
454 pentry->meta_data_addr_high = 0;
455 pentry->meta_data_addr_low = 0;
456 pentry->data_size_byte = info.image_size;
457 pentry->num_register_entries = 0;
458
459 if (firmware_type == UCODE_ID_RLC_G)
460 pentry->flags = 1;
461 else
462 pentry->flags = 0;
463 } else {
464 return result;
465 }
466
467 return result;
468}
469
470static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
471{
472 struct tonga_smumgr *tonga_smu =
473 (struct tonga_smumgr *)(smumgr->backend);
474 uint16_t fw_to_load;
475 int result = 0;
476 struct SMU_DRAMData_TOC *toc;
477 /**
478 * First time this gets called during SmuMgr init,
479 * we haven't processed SMU header file yet,
480 * so Soft Register Start offset is unknown.
481 * However, for this case, UcodeLoadStatus is already 0,
482 * so we can skip this if the Soft Registers Start offset is 0.
483 */
484 cgs_write_ind_register(smumgr->device,
485 CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0);
486
487 tonga_send_msg_to_smc_with_parameter(smumgr,
488 PPSMC_MSG_SMU_DRAM_ADDR_HI,
489 tonga_smu->smu_buffer.mc_addr_high);
490 tonga_send_msg_to_smc_with_parameter(smumgr,
491 PPSMC_MSG_SMU_DRAM_ADDR_LO,
492 tonga_smu->smu_buffer.mc_addr_low);
493
494 toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader;
495 toc->num_entries = 0;
496 toc->structure_version = 1;
497
498 PP_ASSERT_WITH_CODE(
499 0 == tonga_populate_single_firmware_entry(smumgr,
500 UCODE_ID_RLC_G,
501 &toc->entry[toc->num_entries++]),
502 "Failed to Get Firmware Entry.\n",
503 return -1);
504 PP_ASSERT_WITH_CODE(
505 0 == tonga_populate_single_firmware_entry(smumgr,
506 UCODE_ID_CP_CE,
507 &toc->entry[toc->num_entries++]),
508 "Failed to Get Firmware Entry.\n",
509 return -1);
510 PP_ASSERT_WITH_CODE(
511 0 == tonga_populate_single_firmware_entry
512 (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
513 "Failed to Get Firmware Entry.\n", return -1);
514 PP_ASSERT_WITH_CODE(
515 0 == tonga_populate_single_firmware_entry
516 (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
517 "Failed to Get Firmware Entry.\n", return -1);
518 PP_ASSERT_WITH_CODE(
519 0 == tonga_populate_single_firmware_entry
520 (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
521 "Failed to Get Firmware Entry.\n", return -1);
522 PP_ASSERT_WITH_CODE(
523 0 == tonga_populate_single_firmware_entry
524 (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
525 "Failed to Get Firmware Entry.\n", return -1);
526 PP_ASSERT_WITH_CODE(
527 0 == tonga_populate_single_firmware_entry
528 (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
529 "Failed to Get Firmware Entry.\n", return -1);
530 PP_ASSERT_WITH_CODE(
531 0 == tonga_populate_single_firmware_entry
532 (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
533 "Failed to Get Firmware Entry.\n", return -1);
534 PP_ASSERT_WITH_CODE(
535 0 == tonga_populate_single_firmware_entry
536 (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
537 "Failed to Get Firmware Entry.\n", return -1);
538
539 tonga_send_msg_to_smc_with_parameter(smumgr,
540 PPSMC_MSG_DRV_DRAM_ADDR_HI,
541 tonga_smu->header_buffer.mc_addr_high);
542 tonga_send_msg_to_smc_with_parameter(smumgr,
543 PPSMC_MSG_DRV_DRAM_ADDR_LO,
544 tonga_smu->header_buffer.mc_addr_low);
545
546 fw_to_load = UCODE_ID_RLC_G_MASK
547 + UCODE_ID_SDMA0_MASK
548 + UCODE_ID_SDMA1_MASK
549 + UCODE_ID_CP_CE_MASK
550 + UCODE_ID_CP_ME_MASK
551 + UCODE_ID_CP_PFP_MASK
552 + UCODE_ID_CP_MEC_MASK;
553
554 PP_ASSERT_WITH_CODE(
555 0 == tonga_send_msg_to_smc_with_parameter_without_waiting(
556 smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
557 "Fail to Request SMU Load uCode", return 0);
558
559 return result;
560}
561
562static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
563 uint32_t firmwareType)
564{
565 return 0;
566}
567
568/**
569 * Upload the SMC firmware to the SMC microcontroller.
570 *
571 * @param smumgr the address of the powerplay hardware manager.
572 * @param pFirmware the data structure containing the various sections of the firmware.
573 */
574static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr)
575{
576 const uint8_t *src;
577 uint32_t byte_count;
578 uint32_t *data;
579 struct cgs_firmware_info info = {0};
580
581 if (smumgr == NULL || smumgr->device == NULL)
582 return -EINVAL;
583
584 cgs_get_firmware_info(smumgr->device,
585 tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
586
587 if (info.image_size & 3) {
588 printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n");
589 return -EINVAL;
590 }
591
592 if (info.image_size > TONGA_SMC_SIZE) {
593 printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n");
594 return -EINVAL;
595 }
596
597 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
598 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
599
600 byte_count = info.image_size;
601 src = (const uint8_t *)info.kptr;
602
603 data = (uint32_t *)src;
604 for (; byte_count >= 4; data++, byte_count -= 4)
605 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
606
607 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
608
609 return 0;
610}
611
612static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
613{
614 int result;
615
616 /* Assert reset */
617 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
618 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
619
620 result = tonga_smu_upload_firmware_image(smumgr);
621 if (result)
622 return result;
623
624 /* Clear status */
625 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
626 ixSMU_STATUS, 0);
627
628 /* Enable clock */
629 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
630 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
631
632 /* De-assert reset */
633 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
634 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
635
636 /* Set SMU Auto Start */
637 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
638 SMU_INPUT_DATA, AUTO_START, 1);
639
640 /* Clear firmware interrupt enable flag */
641 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
642 ixFIRMWARE_FLAGS, 0);
643
644 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
645 RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
646
647 /**
648 * Call Test SMU message with 0x20000 offset to trigger SMU start
649 */
650 tonga_send_msg_to_smc_offset(smumgr);
651
652 /* Wait for done bit to be set */
653 SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
654 SMU_STATUS, SMU_DONE, 0);
655
656 /* Check pass/failed indicator */
657 if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
658 CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) {
659 printk(KERN_ERR "[ powerplay ] SMU Firmware start failed\n");
660 return -EINVAL;
661 }
662
663 /* Wait for firmware to initialize */
664 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
665 FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
666
667 return 0;
668}
669
670
671static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
672{
673 int result = 0;
674
675 /* wait for smc boot up */
676 SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
677 RCU_UC_EVENTS, boot_seq_done, 0);
678
679 /*Clear firmware interrupt enable flag*/
680 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
681 ixFIRMWARE_FLAGS, 0);
682
683
684 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
685 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
686
687 result = tonga_smu_upload_firmware_image(smumgr);
688
689 if (result != 0)
690 return result;
691
692 /* Set smc instruct start point at 0x0 */
693 tonga_program_jump_on_start(smumgr);
694
695
696 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
697 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
698
699 /*De-assert reset*/
700 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
701 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
702
703 /* Wait for firmware to initialize */
704 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
705 FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
706
707 return result;
708}
709
710static int tonga_start_smu(struct pp_smumgr *smumgr)
711{
712 int result;
713
714 /* Only start SMC if SMC RAM is not running */
715 if (!tonga_is_smc_ram_running(smumgr)) {
716 /*Check if SMU is running in protected mode*/
717 if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
718 SMU_FIRMWARE, SMU_MODE)) {
719 result = tonga_start_in_non_protection_mode(smumgr);
720 if (result)
721 return result;
722 } else {
723 result = tonga_start_in_protection_mode(smumgr);
724 if (result)
725 return result;
726 }
727 }
728
729 result = tonga_request_smu_reload_fw(smumgr);
730
731 return result;
732}
733
734/**
735 * Write a 32bit value to the SMC SRAM space.
736 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
737 * @param smumgr the address of the powerplay hardware manager.
738 * @param smcAddress the address in the SMC RAM to access.
739 * @param value to write to the SMC SRAM.
740 */
741static int tonga_smu_init(struct pp_smumgr *smumgr)
742{
743 struct tonga_smumgr *tonga_smu;
744 uint8_t *internal_buf;
745 uint64_t mc_addr = 0;
746 /* Allocate memory for backend private data */
747 tonga_smu = (struct tonga_smumgr *)(smumgr->backend);
748 tonga_smu->header_buffer.data_size =
749 ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
750 tonga_smu->smu_buffer.data_size = 200*4096;
751
752 smu_allocate_memory(smumgr->device,
753 tonga_smu->header_buffer.data_size,
754 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
755 PAGE_SIZE,
756 &mc_addr,
757 &tonga_smu->header_buffer.kaddr,
758 &tonga_smu->header_buffer.handle);
759
760 tonga_smu->pHeader = tonga_smu->header_buffer.kaddr;
761 tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
762 tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
763
764 PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader),
765 "Out of memory.",
766 kfree(smumgr->backend);
767 cgs_free_gpu_mem(smumgr->device,
768 (cgs_handle_t)tonga_smu->header_buffer.handle);
769 return -1);
770
771 smu_allocate_memory(smumgr->device,
772 tonga_smu->smu_buffer.data_size,
773 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
774 PAGE_SIZE,
775 &mc_addr,
776 &tonga_smu->smu_buffer.kaddr,
777 &tonga_smu->smu_buffer.handle);
778
779 internal_buf = tonga_smu->smu_buffer.kaddr;
780 tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
781 tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
782
783 PP_ASSERT_WITH_CODE((NULL != internal_buf),
784 "Out of memory.",
785 kfree(smumgr->backend);
786 cgs_free_gpu_mem(smumgr->device,
787 (cgs_handle_t)tonga_smu->smu_buffer.handle);
788 return -1;);
789
790 return 0;
791}
792
793static const struct pp_smumgr_func tonga_smu_funcs = {
794 .smu_init = &tonga_smu_init,
795 .smu_fini = &tonga_smu_fini,
796 .start_smu = &tonga_start_smu,
797 .check_fw_load_finish = &tonga_check_fw_load_finish,
798 .request_smu_load_fw = &tonga_request_smu_reload_fw,
799 .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw,
800 .send_msg_to_smc = &tonga_send_msg_to_smc,
801 .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter,
802 .download_pptable_settings = NULL,
803 .upload_pptable_settings = NULL,
804};
805
806int tonga_smum_init(struct pp_smumgr *smumgr)
807{
808 struct tonga_smumgr *tonga_smu = NULL;
809
810 tonga_smu = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL);
811
812 if (tonga_smu == NULL)
813 return -ENOMEM;
814
815 smumgr->backend = tonga_smu;
816 smumgr->smumgr_funcs = &tonga_smu_funcs;
817
818 return 0;
819}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
new file mode 100644
index 000000000000..33c788d7f05c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _TONGA_SMUMGR_H_
25#define _TONGA_SMUMGR_H_
26
27struct tonga_buffer_entry {
28 uint32_t data_size;
29 uint32_t mc_addr_low;
30 uint32_t mc_addr_high;
31 void *kaddr;
32 unsigned long handle;
33};
34
35struct tonga_smumgr {
36 uint8_t *pHeader;
37 uint8_t *pMecImage;
38 uint32_t ulSoftRegsStart;
39
40 struct tonga_buffer_entry header_buffer;
41 struct tonga_buffer_entry smu_buffer;
42};
43
44extern int tonga_smum_init(struct pp_smumgr *smumgr);
45extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
46 uint32_t smcStartAddress, const uint8_t *src,
47 uint32_t byteCount, uint32_t limit);
48extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
49 uint32_t *value, uint32_t limit);
50extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
51 uint32_t value, uint32_t limit);
52
53#endif
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 3a4820e863ec..8b2becd1aa07 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -47,6 +47,8 @@ static void amd_sched_rq_init(struct amd_sched_rq *rq)
47static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, 47static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
48 struct amd_sched_entity *entity) 48 struct amd_sched_entity *entity)
49{ 49{
50 if (!list_empty(&entity->list))
51 return;
50 spin_lock(&rq->lock); 52 spin_lock(&rq->lock);
51 list_add_tail(&entity->list, &rq->entities); 53 list_add_tail(&entity->list, &rq->entities);
52 spin_unlock(&rq->lock); 54 spin_unlock(&rq->lock);
@@ -55,6 +57,8 @@ static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
55static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, 57static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
56 struct amd_sched_entity *entity) 58 struct amd_sched_entity *entity)
57{ 59{
60 if (list_empty(&entity->list))
61 return;
58 spin_lock(&rq->lock); 62 spin_lock(&rq->lock);
59 list_del_init(&entity->list); 63 list_del_init(&entity->list);
60 if (rq->current_entity == entity) 64 if (rq->current_entity == entity)
@@ -138,9 +142,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
138 atomic_set(&entity->fence_seq, 0); 142 atomic_set(&entity->fence_seq, 0);
139 entity->fence_context = fence_context_alloc(1); 143 entity->fence_context = fence_context_alloc(1);
140 144
141 /* Add the entity to the run queue */
142 amd_sched_rq_add_entity(rq, entity);
143
144 return 0; 145 return 0;
145} 146}
146 147
@@ -302,9 +303,11 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
302 spin_unlock(&entity->queue_lock); 303 spin_unlock(&entity->queue_lock);
303 304
304 /* first job wakes up scheduler */ 305 /* first job wakes up scheduler */
305 if (first) 306 if (first) {
307 /* Add the entity to the run queue */
308 amd_sched_rq_add_entity(entity->rq, entity);
306 amd_sched_wakeup(sched); 309 amd_sched_wakeup(sched);
307 310 }
308 return added; 311 return added;
309} 312}
310 313
@@ -349,14 +352,17 @@ static struct amd_sched_entity *
349amd_sched_select_entity(struct amd_gpu_scheduler *sched) 352amd_sched_select_entity(struct amd_gpu_scheduler *sched)
350{ 353{
351 struct amd_sched_entity *entity; 354 struct amd_sched_entity *entity;
355 int i;
352 356
353 if (!amd_sched_ready(sched)) 357 if (!amd_sched_ready(sched))
354 return NULL; 358 return NULL;
355 359
356 /* Kernel run queue has higher priority than normal run queue*/ 360 /* Kernel run queue has higher priority than normal run queue*/
357 entity = amd_sched_rq_select_entity(&sched->kernel_rq); 361 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
358 if (entity == NULL) 362 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
359 entity = amd_sched_rq_select_entity(&sched->sched_rq); 363 if (entity)
364 break;
365 }
360 366
361 return entity; 367 return entity;
362} 368}
@@ -478,12 +484,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
478 struct amd_sched_backend_ops *ops, 484 struct amd_sched_backend_ops *ops,
479 unsigned hw_submission, long timeout, const char *name) 485 unsigned hw_submission, long timeout, const char *name)
480{ 486{
487 int i;
481 sched->ops = ops; 488 sched->ops = ops;
482 sched->hw_submission_limit = hw_submission; 489 sched->hw_submission_limit = hw_submission;
483 sched->name = name; 490 sched->name = name;
484 sched->timeout = timeout; 491 sched->timeout = timeout;
485 amd_sched_rq_init(&sched->sched_rq); 492 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
486 amd_sched_rq_init(&sched->kernel_rq); 493 amd_sched_rq_init(&sched->sched_rq[i]);
487 494
488 init_waitqueue_head(&sched->wake_up_worker); 495 init_waitqueue_head(&sched->wake_up_worker);
489 init_waitqueue_head(&sched->job_scheduled); 496 init_waitqueue_head(&sched->job_scheduled);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index a0f0ae53aacd..9403145d7bee 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -104,6 +104,12 @@ struct amd_sched_backend_ops {
104 struct fence *(*run_job)(struct amd_sched_job *sched_job); 104 struct fence *(*run_job)(struct amd_sched_job *sched_job);
105}; 105};
106 106
107enum amd_sched_priority {
108 AMD_SCHED_PRIORITY_KERNEL = 0,
109 AMD_SCHED_PRIORITY_NORMAL,
110 AMD_SCHED_MAX_PRIORITY
111};
112
107/** 113/**
108 * One scheduler is implemented for each hardware ring 114 * One scheduler is implemented for each hardware ring
109*/ 115*/
@@ -112,8 +118,7 @@ struct amd_gpu_scheduler {
112 uint32_t hw_submission_limit; 118 uint32_t hw_submission_limit;
113 long timeout; 119 long timeout;
114 const char *name; 120 const char *name;
115 struct amd_sched_rq sched_rq; 121 struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY];
116 struct amd_sched_rq kernel_rq;
117 wait_queue_head_t wake_up_worker; 122 wait_queue_head_t wake_up_worker;
118 wait_queue_head_t job_scheduled; 123 wait_queue_head_t job_scheduled;
119 atomic_t hw_rq_count; 124 atomic_t hw_rq_count;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index cebcab560626..0293eb74d777 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -928,11 +928,10 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
928 } 928 }
929 } 929 }
930 930
931 mutex_lock(&dev->struct_mutex);
932 if (dcrtc->cursor_obj) { 931 if (dcrtc->cursor_obj) {
933 dcrtc->cursor_obj->update = NULL; 932 dcrtc->cursor_obj->update = NULL;
934 dcrtc->cursor_obj->update_data = NULL; 933 dcrtc->cursor_obj->update_data = NULL;
935 drm_gem_object_unreference(&dcrtc->cursor_obj->obj); 934 drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
936 } 935 }
937 dcrtc->cursor_obj = obj; 936 dcrtc->cursor_obj = obj;
938 dcrtc->cursor_w = w; 937 dcrtc->cursor_w = w;
@@ -942,14 +941,12 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
942 obj->update_data = dcrtc; 941 obj->update_data = dcrtc;
943 obj->update = cursor_update; 942 obj->update = cursor_update;
944 } 943 }
945 mutex_unlock(&dev->struct_mutex);
946 944
947 return ret; 945 return ret;
948} 946}
949 947
950static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 948static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
951{ 949{
952 struct drm_device *dev = crtc->dev;
953 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 950 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
954 int ret; 951 int ret;
955 952
@@ -957,11 +954,9 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
957 if (!dcrtc->variant->has_spu_adv_reg) 954 if (!dcrtc->variant->has_spu_adv_reg)
958 return -EFAULT; 955 return -EFAULT;
959 956
960 mutex_lock(&dev->struct_mutex);
961 dcrtc->cursor_x = x; 957 dcrtc->cursor_x = x;
962 dcrtc->cursor_y = y; 958 dcrtc->cursor_y = y;
963 ret = armada_drm_crtc_cursor_update(dcrtc, false); 959 ret = armada_drm_crtc_cursor_update(dcrtc, false);
964 mutex_unlock(&dev->struct_mutex);
965 960
966 return ret; 961 return ret;
967} 962}
@@ -972,7 +967,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
972 struct armada_private *priv = crtc->dev->dev_private; 967 struct armada_private *priv = crtc->dev->dev_private;
973 968
974 if (dcrtc->cursor_obj) 969 if (dcrtc->cursor_obj)
975 drm_gem_object_unreference(&dcrtc->cursor_obj->obj); 970 drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
976 971
977 priv->dcrtc[dcrtc->num] = NULL; 972 priv->dcrtc[dcrtc->num] = NULL;
978 drm_crtc_cleanup(&dcrtc->crtc); 973 drm_crtc_cleanup(&dcrtc->crtc);
@@ -1074,7 +1069,7 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
1074 return 0; 1069 return 0;
1075} 1070}
1076 1071
1077static struct drm_crtc_funcs armada_crtc_funcs = { 1072static const struct drm_crtc_funcs armada_crtc_funcs = {
1078 .cursor_set = armada_drm_crtc_cursor_set, 1073 .cursor_set = armada_drm_crtc_cursor_set,
1079 .cursor_move = armada_drm_crtc_cursor_move, 1074 .cursor_move = armada_drm_crtc_cursor_move,
1080 .destroy = armada_drm_crtc_destroy, 1075 .destroy = armada_drm_crtc_destroy,
@@ -1216,14 +1211,14 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1216 &armada_primary_plane_funcs, 1211 &armada_primary_plane_funcs,
1217 armada_primary_formats, 1212 armada_primary_formats,
1218 ARRAY_SIZE(armada_primary_formats), 1213 ARRAY_SIZE(armada_primary_formats),
1219 DRM_PLANE_TYPE_PRIMARY); 1214 DRM_PLANE_TYPE_PRIMARY, NULL);
1220 if (ret) { 1215 if (ret) {
1221 kfree(primary); 1216 kfree(primary);
1222 return ret; 1217 return ret;
1223 } 1218 }
1224 1219
1225 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, 1220 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
1226 &armada_crtc_funcs); 1221 &armada_crtc_funcs, NULL);
1227 if (ret) 1222 if (ret)
1228 goto err_crtc_init; 1223 goto err_crtc_init;
1229 1224
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index 471e45627f1e..d4f7ab0a30d4 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -21,9 +21,9 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
21 struct armada_private *priv = dev->dev_private; 21 struct armada_private *priv = dev->dev_private;
22 int ret; 22 int ret;
23 23
24 mutex_lock(&dev->struct_mutex); 24 mutex_lock(&priv->linear_lock);
25 ret = drm_mm_dump_table(m, &priv->linear); 25 ret = drm_mm_dump_table(m, &priv->linear);
26 mutex_unlock(&dev->struct_mutex); 26 mutex_unlock(&priv->linear_lock);
27 27
28 return ret; 28 return ret;
29} 29}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index 4df6f2af2b21..3b2bb6128d40 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -57,7 +57,8 @@ struct armada_private {
57 DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8); 57 DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
58 struct drm_fb_helper *fbdev; 58 struct drm_fb_helper *fbdev;
59 struct armada_crtc *dcrtc[2]; 59 struct armada_crtc *dcrtc[2];
60 struct drm_mm linear; 60 struct drm_mm linear; /* protected by linear_lock */
61 struct mutex linear_lock;
61 struct drm_property *csc_yuv_prop; 62 struct drm_property *csc_yuv_prop;
62 struct drm_property *csc_rgb_prop; 63 struct drm_property *csc_rgb_prop;
63 struct drm_property *colorkey_prop; 64 struct drm_property *colorkey_prop;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 77ab93d60125..3bd7e1cde99e 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -102,6 +102,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
102 dev->mode_config.preferred_depth = 24; 102 dev->mode_config.preferred_depth = 24;
103 dev->mode_config.funcs = &armada_drm_mode_config_funcs; 103 dev->mode_config.funcs = &armada_drm_mode_config_funcs;
104 drm_mm_init(&priv->linear, mem->start, resource_size(mem)); 104 drm_mm_init(&priv->linear, mem->start, resource_size(mem));
105 mutex_init(&priv->linear_lock);
105 106
106 ret = component_bind_all(dev->dev, dev); 107 ret = component_bind_all(dev->dev, dev);
107 if (ret) 108 if (ret)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 60a688ef81c7..6e731db31aa4 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -46,22 +46,26 @@ static size_t roundup_gem_size(size_t size)
46 return roundup(size, PAGE_SIZE); 46 return roundup(size, PAGE_SIZE);
47} 47}
48 48
49/* dev->struct_mutex is held here */
50void armada_gem_free_object(struct drm_gem_object *obj) 49void armada_gem_free_object(struct drm_gem_object *obj)
51{ 50{
52 struct armada_gem_object *dobj = drm_to_armada_gem(obj); 51 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
52 struct armada_private *priv = obj->dev->dev_private;
53 53
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj); 54 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
55 55
56 drm_gem_free_mmap_offset(&dobj->obj); 56 drm_gem_free_mmap_offset(&dobj->obj);
57 57
58 might_lock(&priv->linear_lock);
59
58 if (dobj->page) { 60 if (dobj->page) {
59 /* page backed memory */ 61 /* page backed memory */
60 unsigned int order = get_order(dobj->obj.size); 62 unsigned int order = get_order(dobj->obj.size);
61 __free_pages(dobj->page, order); 63 __free_pages(dobj->page, order);
62 } else if (dobj->linear) { 64 } else if (dobj->linear) {
63 /* linear backed memory */ 65 /* linear backed memory */
66 mutex_lock(&priv->linear_lock);
64 drm_mm_remove_node(dobj->linear); 67 drm_mm_remove_node(dobj->linear);
68 mutex_unlock(&priv->linear_lock);
65 kfree(dobj->linear); 69 kfree(dobj->linear);
66 if (dobj->addr) 70 if (dobj->addr)
67 iounmap(dobj->addr); 71 iounmap(dobj->addr);
@@ -144,10 +148,10 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
144 if (!node) 148 if (!node)
145 return -ENOSPC; 149 return -ENOSPC;
146 150
147 mutex_lock(&dev->struct_mutex); 151 mutex_lock(&priv->linear_lock);
148 ret = drm_mm_insert_node(&priv->linear, node, size, align, 152 ret = drm_mm_insert_node(&priv->linear, node, size, align,
149 DRM_MM_SEARCH_DEFAULT); 153 DRM_MM_SEARCH_DEFAULT);
150 mutex_unlock(&dev->struct_mutex); 154 mutex_unlock(&priv->linear_lock);
151 if (ret) { 155 if (ret) {
152 kfree(node); 156 kfree(node);
153 return ret; 157 return ret;
@@ -158,9 +162,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
158 /* Ensure that the memory we're returning is cleared. */ 162 /* Ensure that the memory we're returning is cleared. */
159 ptr = ioremap_wc(obj->linear->start, size); 163 ptr = ioremap_wc(obj->linear->start, size);
160 if (!ptr) { 164 if (!ptr) {
161 mutex_lock(&dev->struct_mutex); 165 mutex_lock(&priv->linear_lock);
162 drm_mm_remove_node(obj->linear); 166 drm_mm_remove_node(obj->linear);
163 mutex_unlock(&dev->struct_mutex); 167 mutex_unlock(&priv->linear_lock);
164 kfree(obj->linear); 168 kfree(obj->linear);
165 obj->linear = NULL; 169 obj->linear = NULL;
166 return -ENOMEM; 170 return -ENOMEM;
@@ -274,18 +278,16 @@ int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
274 struct armada_gem_object *obj; 278 struct armada_gem_object *obj;
275 int ret = 0; 279 int ret = 0;
276 280
277 mutex_lock(&dev->struct_mutex);
278 obj = armada_gem_object_lookup(dev, file, handle); 281 obj = armada_gem_object_lookup(dev, file, handle);
279 if (!obj) { 282 if (!obj) {
280 DRM_ERROR("failed to lookup gem object\n"); 283 DRM_ERROR("failed to lookup gem object\n");
281 ret = -EINVAL; 284 return -EINVAL;
282 goto err_unlock;
283 } 285 }
284 286
285 /* Don't allow imported objects to be mapped */ 287 /* Don't allow imported objects to be mapped */
286 if (obj->obj.import_attach) { 288 if (obj->obj.import_attach) {
287 ret = -EINVAL; 289 ret = -EINVAL;
288 goto err_unlock; 290 goto err_unref;
289 } 291 }
290 292
291 ret = drm_gem_create_mmap_offset(&obj->obj); 293 ret = drm_gem_create_mmap_offset(&obj->obj);
@@ -294,9 +296,8 @@ int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
294 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset); 296 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
295 } 297 }
296 298
297 drm_gem_object_unreference(&obj->obj); 299 err_unref:
298 err_unlock: 300 drm_gem_object_unreference_unlocked(&obj->obj);
299 mutex_unlock(&dev->struct_mutex);
300 301
301 return ret; 302 return ret;
302} 303}
@@ -352,13 +353,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
352 return -ENOENT; 353 return -ENOENT;
353 354
354 if (!dobj->obj.filp) { 355 if (!dobj->obj.filp) {
355 drm_gem_object_unreference(&dobj->obj); 356 drm_gem_object_unreference_unlocked(&dobj->obj);
356 return -EINVAL; 357 return -EINVAL;
357 } 358 }
358 359
359 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE, 360 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
360 MAP_SHARED, args->offset); 361 MAP_SHARED, args->offset);
361 drm_gem_object_unreference(&dobj->obj); 362 drm_gem_object_unreference_unlocked(&dobj->obj);
362 if (IS_ERR_VALUE(addr)) 363 if (IS_ERR_VALUE(addr))
363 return addr; 364 return addr;
364 365
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 5c22b380f8f3..148e8a42b2c6 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -460,7 +460,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
460 &armada_ovl_plane_funcs, 460 &armada_ovl_plane_funcs,
461 armada_ovl_formats, 461 armada_ovl_formats,
462 ARRAY_SIZE(armada_ovl_formats), 462 ARRAY_SIZE(armada_ovl_formats),
463 DRM_PLANE_TYPE_OVERLAY); 463 DRM_PLANE_TYPE_OVERLAY, NULL);
464 if (ret) { 464 if (ret) {
465 kfree(dplane); 465 kfree(dplane);
466 return ret; 466 return ret;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 69d19f3304a5..0123458cbd83 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -751,7 +751,7 @@ static int ast_encoder_init(struct drm_device *dev)
751 return -ENOMEM; 751 return -ENOMEM;
752 752
753 drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs, 753 drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
754 DRM_MODE_ENCODER_DAC); 754 DRM_MODE_ENCODER_DAC, NULL);
755 drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs); 755 drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
756 756
757 ast_encoder->base.possible_crtcs = 1; 757 ast_encoder->base.possible_crtcs = 1;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9f6e234e7029..468a14f266a7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -344,7 +344,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
344 ret = drm_crtc_init_with_planes(dev, &crtc->base, 344 ret = drm_crtc_init_with_planes(dev, &crtc->base,
345 &planes->primary->base, 345 &planes->primary->base,
346 planes->cursor ? &planes->cursor->base : NULL, 346 planes->cursor ? &planes->cursor->base : NULL,
347 &atmel_hlcdc_crtc_funcs); 347 &atmel_hlcdc_crtc_funcs, NULL);
348 if (ret < 0) 348 if (ret < 0)
349 goto fail; 349 goto fail;
350 350
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 816895447155..a45b32ba029e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -333,6 +333,10 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
333 .data = &atmel_hlcdc_dc_at91sam9x5, 333 .data = &atmel_hlcdc_dc_at91sam9x5,
334 }, 334 },
335 { 335 {
336 .compatible = "atmel,sama5d2-hlcdc",
337 .data = &atmel_hlcdc_dc_sama5d4,
338 },
339 {
336 .compatible = "atmel,sama5d3-hlcdc", 340 .compatible = "atmel,sama5d3-hlcdc",
337 .data = &atmel_hlcdc_dc_sama5d3, 341 .data = &atmel_hlcdc_dc_sama5d3,
338 }, 342 },
@@ -342,6 +346,7 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
342 }, 346 },
343 { /* sentinel */ }, 347 { /* sentinel */ },
344}; 348};
349MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match);
345 350
346int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc, 351int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
347 struct drm_display_mode *mode) 352 struct drm_display_mode *mode)
@@ -733,10 +738,6 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
733 if (!ddev) 738 if (!ddev)
734 return -ENOMEM; 739 return -ENOMEM;
735 740
736 ret = drm_dev_set_unique(ddev, dev_name(ddev->dev));
737 if (ret)
738 goto err_unref;
739
740 ret = atmel_hlcdc_dc_load(ddev); 741 ret = atmel_hlcdc_dc_load(ddev);
741 if (ret) 742 if (ret)
742 goto err_unref; 743 goto err_unref;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 067e4c144bd6..0f7ec016e7a9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -146,7 +146,7 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
146 cfg); 146 cfg);
147} 147}
148 148
149static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = { 149static const struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
150 .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup, 150 .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
151 .mode_set = atmel_hlcdc_rgb_encoder_mode_set, 151 .mode_set = atmel_hlcdc_rgb_encoder_mode_set,
152 .disable = atmel_hlcdc_panel_encoder_disable, 152 .disable = atmel_hlcdc_panel_encoder_disable,
@@ -192,7 +192,7 @@ atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
192 return &rgb->encoder; 192 return &rgb->encoder;
193} 193}
194 194
195static struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { 195static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
196 .get_modes = atmel_hlcdc_panel_get_modes, 196 .get_modes = atmel_hlcdc_panel_get_modes,
197 .mode_valid = atmel_hlcdc_rgb_mode_valid, 197 .mode_valid = atmel_hlcdc_rgb_mode_valid,
198 .best_encoder = atmel_hlcdc_rgb_best_encoder, 198 .best_encoder = atmel_hlcdc_rgb_best_encoder,
@@ -256,7 +256,7 @@ static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
256 &atmel_hlcdc_panel_encoder_helper_funcs); 256 &atmel_hlcdc_panel_encoder_helper_funcs);
257 ret = drm_encoder_init(dev, &panel->base.encoder, 257 ret = drm_encoder_init(dev, &panel->base.encoder,
258 &atmel_hlcdc_panel_encoder_funcs, 258 &atmel_hlcdc_panel_encoder_funcs,
259 DRM_MODE_ENCODER_LVDS); 259 DRM_MODE_ENCODER_LVDS, NULL);
260 if (ret) 260 if (ret)
261 return ret; 261 return ret;
262 262
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index d0299aed517e..1ffe9c329c46 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -941,7 +941,7 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
941 ret = drm_universal_plane_init(dev, &plane->base, 0, 941 ret = drm_universal_plane_init(dev, &plane->base, 0,
942 &layer_plane_funcs, 942 &layer_plane_funcs,
943 desc->formats->formats, 943 desc->formats->formats,
944 desc->formats->nformats, type); 944 desc->formats->nformats, type, NULL);
945 if (ret) 945 if (ret)
946 return ERR_PTR(ret); 946 return ERR_PTR(ret);
947 947
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 26bcd03a8cb6..2849f1b95eec 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -119,7 +119,7 @@ static int bochs_crtc_page_flip(struct drm_crtc *crtc,
119 bochs_crtc_mode_set_base(crtc, 0, 0, old_fb); 119 bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
120 if (event) { 120 if (event) {
121 spin_lock_irqsave(&bochs->dev->event_lock, irqflags); 121 spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
122 drm_send_vblank_event(bochs->dev, -1, event); 122 drm_crtc_send_vblank_event(crtc, event);
123 spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags); 123 spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
124 } 124 }
125 return 0; 125 return 0;
@@ -196,7 +196,7 @@ static void bochs_encoder_init(struct drm_device *dev)
196 196
197 encoder->possible_crtcs = 0x1; 197 encoder->possible_crtcs = 0x1;
198 drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs, 198 drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
199 DRM_MODE_ENCODER_DAC); 199 DRM_MODE_ENCODER_DAC, NULL);
200 drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs); 200 drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
201} 201}
202 202
@@ -245,13 +245,13 @@ static enum drm_connector_status bochs_connector_detect(struct drm_connector
245 return connector_status_connected; 245 return connector_status_connected;
246} 246}
247 247
248struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { 248static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
249 .get_modes = bochs_connector_get_modes, 249 .get_modes = bochs_connector_get_modes,
250 .mode_valid = bochs_connector_mode_valid, 250 .mode_valid = bochs_connector_mode_valid,
251 .best_encoder = bochs_connector_best_encoder, 251 .best_encoder = bochs_connector_best_encoder,
252}; 252};
253 253
254struct drm_connector_funcs bochs_connector_connector_funcs = { 254static const struct drm_connector_funcs bochs_connector_connector_funcs = {
255 .dpms = drm_helper_connector_dpms, 255 .dpms = drm_helper_connector_dpms,
256 .detect = bochs_connector_detect, 256 .detect = bochs_connector_detect,
257 .fill_modes = drm_helper_probe_single_connector_modes, 257 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -283,7 +283,7 @@ int bochs_kms_init(struct bochs_device *bochs)
283 bochs->dev->mode_config.preferred_depth = 24; 283 bochs->dev->mode_config.preferred_depth = 24;
284 bochs->dev->mode_config.prefer_shadow = 0; 284 bochs->dev->mode_config.prefer_shadow = 0;
285 285
286 bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs; 286 bochs->dev->mode_config.funcs = &bochs_mode_funcs;
287 287
288 bochs_crtc_init(bochs->dev); 288 bochs_crtc_init(bochs->dev);
289 bochs_encoder_init(bochs->dev); 289 bochs_encoder_init(bochs->dev);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 6dddd392aa42..27e2022de89d 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -22,7 +22,6 @@ config DRM_DW_HDMI_AHB_AUDIO
22 Designware HDMI block. This is used in conjunction with 22 Designware HDMI block. This is used in conjunction with
23 the i.MX6 HDMI driver. 23 the i.MX6 HDMI driver.
24 24
25
26config DRM_NXP_PTN3460 25config DRM_NXP_PTN3460
27 tristate "NXP PTN3460 DP/LVDS bridge" 26 tristate "NXP PTN3460 DP/LVDS bridge"
28 depends on OF 27 depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index d4e28beec30e..f13c33d67c03 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,6 +1,6 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2 2
3obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o 3obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
4obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw_hdmi-ahb-audio.o 4obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
5obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o 5obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
6obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o 6obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
index 59f630f1c61a..122bb015f4a9 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
@@ -21,7 +21,7 @@
21#include <sound/pcm_drm_eld.h> 21#include <sound/pcm_drm_eld.h>
22#include <sound/pcm_iec958.h> 22#include <sound/pcm_iec958.h>
23 23
24#include "dw_hdmi-audio.h" 24#include "dw-hdmi-audio.h"
25 25
26#define DRIVER_NAME "dw-hdmi-ahb-audio" 26#define DRIVER_NAME "dw-hdmi-ahb-audio"
27 27
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
index 91f631beecc7..91f631beecc7 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 56de9f1c95fc..b0aac4733020 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -22,13 +22,14 @@
22 22
23#include <drm/drm_of.h> 23#include <drm/drm_of.h>
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include <drm/drm_atomic_helper.h>
25#include <drm/drm_crtc_helper.h> 26#include <drm/drm_crtc_helper.h>
26#include <drm/drm_edid.h> 27#include <drm/drm_edid.h>
27#include <drm/drm_encoder_slave.h> 28#include <drm/drm_encoder_slave.h>
28#include <drm/bridge/dw_hdmi.h> 29#include <drm/bridge/dw_hdmi.h>
29 30
30#include "dw_hdmi.h" 31#include "dw-hdmi.h"
31#include "dw_hdmi-audio.h" 32#include "dw-hdmi-audio.h"
32 33
33#define HDMI_EDID_LEN 512 34#define HDMI_EDID_LEN 512
34 35
@@ -1514,7 +1515,7 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
1514 mutex_unlock(&hdmi->mutex); 1515 mutex_unlock(&hdmi->mutex);
1515} 1516}
1516 1517
1517static struct drm_connector_funcs dw_hdmi_connector_funcs = { 1518static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
1518 .dpms = drm_helper_connector_dpms, 1519 .dpms = drm_helper_connector_dpms,
1519 .fill_modes = drm_helper_probe_single_connector_modes, 1520 .fill_modes = drm_helper_probe_single_connector_modes,
1520 .detect = dw_hdmi_connector_detect, 1521 .detect = dw_hdmi_connector_detect,
@@ -1522,13 +1523,24 @@ static struct drm_connector_funcs dw_hdmi_connector_funcs = {
1522 .force = dw_hdmi_connector_force, 1523 .force = dw_hdmi_connector_force,
1523}; 1524};
1524 1525
1525static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { 1526static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
1527 .dpms = drm_atomic_helper_connector_dpms,
1528 .fill_modes = drm_helper_probe_single_connector_modes,
1529 .detect = dw_hdmi_connector_detect,
1530 .destroy = dw_hdmi_connector_destroy,
1531 .force = dw_hdmi_connector_force,
1532 .reset = drm_atomic_helper_connector_reset,
1533 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1534 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1535};
1536
1537static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
1526 .get_modes = dw_hdmi_connector_get_modes, 1538 .get_modes = dw_hdmi_connector_get_modes,
1527 .mode_valid = dw_hdmi_connector_mode_valid, 1539 .mode_valid = dw_hdmi_connector_mode_valid,
1528 .best_encoder = dw_hdmi_connector_best_encoder, 1540 .best_encoder = dw_hdmi_connector_best_encoder,
1529}; 1541};
1530 1542
1531static struct drm_bridge_funcs dw_hdmi_bridge_funcs = { 1543static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
1532 .enable = dw_hdmi_bridge_enable, 1544 .enable = dw_hdmi_bridge_enable,
1533 .disable = dw_hdmi_bridge_disable, 1545 .disable = dw_hdmi_bridge_disable,
1534 .pre_enable = dw_hdmi_bridge_nop, 1546 .pre_enable = dw_hdmi_bridge_nop,
@@ -1645,10 +1657,15 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
1645 1657
1646 drm_connector_helper_add(&hdmi->connector, 1658 drm_connector_helper_add(&hdmi->connector,
1647 &dw_hdmi_connector_helper_funcs); 1659 &dw_hdmi_connector_helper_funcs);
1648 drm_connector_init(drm, &hdmi->connector, &dw_hdmi_connector_funcs,
1649 DRM_MODE_CONNECTOR_HDMIA);
1650 1660
1651 hdmi->connector.encoder = encoder; 1661 if (drm_core_check_feature(drm, DRIVER_ATOMIC))
1662 drm_connector_init(drm, &hdmi->connector,
1663 &dw_hdmi_atomic_connector_funcs,
1664 DRM_MODE_CONNECTOR_HDMIA);
1665 else
1666 drm_connector_init(drm, &hdmi->connector,
1667 &dw_hdmi_connector_funcs,
1668 DRM_MODE_CONNECTOR_HDMIA);
1652 1669
1653 drm_mode_connector_attach_encoder(&hdmi->connector, encoder); 1670 drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
1654 1671
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
index fc9a560429d6..fc9a560429d6 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi.h
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 0ffa3a6a206a..7ecd59f70b8e 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -242,7 +242,7 @@ static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
242 return ptn_bridge->bridge.encoder; 242 return ptn_bridge->bridge.encoder;
243} 243}
244 244
245static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { 245static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
246 .get_modes = ptn3460_get_modes, 246 .get_modes = ptn3460_get_modes,
247 .best_encoder = ptn3460_best_encoder, 247 .best_encoder = ptn3460_best_encoder,
248}; 248};
@@ -258,7 +258,7 @@ static void ptn3460_connector_destroy(struct drm_connector *connector)
258 drm_connector_cleanup(connector); 258 drm_connector_cleanup(connector);
259} 259}
260 260
261static struct drm_connector_funcs ptn3460_connector_funcs = { 261static const struct drm_connector_funcs ptn3460_connector_funcs = {
262 .dpms = drm_atomic_helper_connector_dpms, 262 .dpms = drm_atomic_helper_connector_dpms,
263 .fill_modes = drm_helper_probe_single_connector_modes, 263 .fill_modes = drm_helper_probe_single_connector_modes,
264 .detect = ptn3460_detect, 264 .detect = ptn3460_detect,
@@ -299,7 +299,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge)
299 return ret; 299 return ret;
300} 300}
301 301
302static struct drm_bridge_funcs ptn3460_bridge_funcs = { 302static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
303 .pre_enable = ptn3460_pre_enable, 303 .pre_enable = ptn3460_pre_enable,
304 .enable = ptn3460_enable, 304 .enable = ptn3460_enable,
305 .disable = ptn3460_disable, 305 .disable = ptn3460_disable,
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 61385f2298bf..4a02854a6963 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -489,7 +489,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
489 encoder->possible_crtcs = 0x1; 489 encoder->possible_crtcs = 0x1;
490 490
491 drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs, 491 drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
492 DRM_MODE_ENCODER_DAC); 492 DRM_MODE_ENCODER_DAC, NULL);
493 drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs); 493 drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
494 494
495 return encoder; 495 return encoder;
@@ -533,12 +533,12 @@ static void cirrus_connector_destroy(struct drm_connector *connector)
533 kfree(connector); 533 kfree(connector);
534} 534}
535 535
536struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = { 536static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
537 .get_modes = cirrus_vga_get_modes, 537 .get_modes = cirrus_vga_get_modes,
538 .best_encoder = cirrus_connector_best_encoder, 538 .best_encoder = cirrus_connector_best_encoder,
539}; 539};
540 540
541struct drm_connector_funcs cirrus_vga_connector_funcs = { 541static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
542 .dpms = drm_helper_connector_dpms, 542 .dpms = drm_helper_connector_dpms,
543 .detect = cirrus_vga_detect, 543 .detect = cirrus_vga_detect,
544 .fill_modes = drm_helper_probe_single_connector_modes, 544 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index ef5f7663a718..3f74193885f1 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -288,8 +288,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
288 state->crtcs[index] = crtc; 288 state->crtcs[index] = crtc;
289 crtc_state->state = state; 289 crtc_state->state = state;
290 290
291 DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n", 291 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
292 crtc->base.id, crtc_state, state); 292 crtc->base.id, crtc->name, crtc_state, state);
293 293
294 return crtc_state; 294 return crtc_state;
295} 295}
@@ -429,11 +429,20 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
429} 429}
430EXPORT_SYMBOL(drm_atomic_crtc_set_property); 430EXPORT_SYMBOL(drm_atomic_crtc_set_property);
431 431
432/* 432/**
433 * drm_atomic_crtc_get_property - get property value from CRTC state
434 * @crtc: the drm CRTC to set a property on
435 * @state: the state object to get the property value from
436 * @property: the property to set
437 * @val: return location for the property value
438 *
433 * This function handles generic/core properties and calls out to 439 * This function handles generic/core properties and calls out to
434 * driver's ->atomic_get_property() for driver properties. To ensure 440 * driver's ->atomic_get_property() for driver properties. To ensure
435 * consistent behavior you must call this function rather than the 441 * consistent behavior you must call this function rather than the
436 * driver hook directly. 442 * driver hook directly.
443 *
444 * RETURNS:
445 * Zero on success, error code on failure
437 */ 446 */
438static int 447static int
439drm_atomic_crtc_get_property(struct drm_crtc *crtc, 448drm_atomic_crtc_get_property(struct drm_crtc *crtc,
@@ -477,8 +486,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
477 */ 486 */
478 487
479 if (state->active && !state->enable) { 488 if (state->active && !state->enable) {
480 DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n", 489 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
481 crtc->base.id); 490 crtc->base.id, crtc->name);
482 return -EINVAL; 491 return -EINVAL;
483 } 492 }
484 493
@@ -487,14 +496,30 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
487 * be able to trigger. */ 496 * be able to trigger. */
488 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 497 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
489 WARN_ON(state->enable && !state->mode_blob)) { 498 WARN_ON(state->enable && !state->mode_blob)) {
490 DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n", 499 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
491 crtc->base.id); 500 crtc->base.id, crtc->name);
492 return -EINVAL; 501 return -EINVAL;
493 } 502 }
494 503
495 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 504 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
496 WARN_ON(!state->enable && state->mode_blob)) { 505 WARN_ON(!state->enable && state->mode_blob)) {
497 DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n", 506 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
507 crtc->base.id, crtc->name);
508 return -EINVAL;
509 }
510
511 /*
512 * Reject event generation for when a CRTC is off and stays off.
513 * It wouldn't be hard to implement this, but userspace has a track
514 * record of happily burning through 100% cpu (or worse, crash) when the
515 * display pipe is suspended. To avoid all that fun just reject updates
516 * that ask for events since likely that indicates a bug in the
517 * compositor's drawing loop. This is consistent with the vblank IOCTL
518 * and legacy page_flip IOCTL which also reject service on a disabled
519 * pipe.
520 */
521 if (state->event && !state->active && !crtc->state->active) {
522 DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
498 crtc->base.id); 523 crtc->base.id);
499 return -EINVAL; 524 return -EINVAL;
500 } 525 }
@@ -540,8 +565,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
540 state->planes[index] = plane; 565 state->planes[index] = plane;
541 plane_state->state = state; 566 plane_state->state = state;
542 567
543 DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n", 568 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
544 plane->base.id, plane_state, state); 569 plane->base.id, plane->name, plane_state, state);
545 570
546 if (plane_state->crtc) { 571 if (plane_state->crtc) {
547 struct drm_crtc_state *crtc_state; 572 struct drm_crtc_state *crtc_state;
@@ -616,11 +641,20 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
616} 641}
617EXPORT_SYMBOL(drm_atomic_plane_set_property); 642EXPORT_SYMBOL(drm_atomic_plane_set_property);
618 643
619/* 644/**
645 * drm_atomic_plane_get_property - get property value from plane state
646 * @plane: the drm plane to set a property on
647 * @state: the state object to get the property value from
648 * @property: the property to set
649 * @val: return location for the property value
650 *
620 * This function handles generic/core properties and calls out to 651 * This function handles generic/core properties and calls out to
621 * driver's ->atomic_get_property() for driver properties. To ensure 652 * driver's ->atomic_get_property() for driver properties. To ensure
622 * consistent behavior you must call this function rather than the 653 * consistent behavior you must call this function rather than the
623 * driver hook directly. 654 * driver hook directly.
655 *
656 * RETURNS:
657 * Zero on success, error code on failure
624 */ 658 */
625static int 659static int
626drm_atomic_plane_get_property(struct drm_plane *plane, 660drm_atomic_plane_get_property(struct drm_plane *plane,
@@ -752,8 +786,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
752 } 786 }
753 787
754 if (plane_switching_crtc(state->state, plane, state)) { 788 if (plane_switching_crtc(state->state, plane, state)) {
755 DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n", 789 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
756 plane->base.id); 790 plane->base.id, plane->name);
757 return -EINVAL; 791 return -EINVAL;
758 } 792 }
759 793
@@ -872,11 +906,20 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
872} 906}
873EXPORT_SYMBOL(drm_atomic_connector_set_property); 907EXPORT_SYMBOL(drm_atomic_connector_set_property);
874 908
875/* 909/**
910 * drm_atomic_connector_get_property - get property value from connector state
911 * @connector: the drm connector to set a property on
912 * @state: the state object to get the property value from
913 * @property: the property to set
914 * @val: return location for the property value
915 *
876 * This function handles generic/core properties and calls out to 916 * This function handles generic/core properties and calls out to
877 * driver's ->atomic_get_property() for driver properties. To ensure 917 * driver's ->atomic_get_property() for driver properties. To ensure
878 * consistent behavior you must call this function rather than the 918 * consistent behavior you must call this function rather than the
879 * driver hook directly. 919 * driver hook directly.
920 *
921 * RETURNS:
922 * Zero on success, error code on failure
880 */ 923 */
881static int 924static int
882drm_atomic_connector_get_property(struct drm_connector *connector, 925drm_atomic_connector_get_property(struct drm_connector *connector,
@@ -977,8 +1020,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
977 } 1020 }
978 1021
979 if (crtc) 1022 if (crtc)
980 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n", 1023 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
981 plane_state, crtc->base.id); 1024 plane_state, crtc->base.id, crtc->name);
982 else 1025 else
983 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", 1026 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
984 plane_state); 1027 plane_state);
@@ -1036,17 +1079,28 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1036{ 1079{
1037 struct drm_crtc_state *crtc_state; 1080 struct drm_crtc_state *crtc_state;
1038 1081
1082 if (conn_state->crtc && conn_state->crtc != crtc) {
1083 crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
1084 conn_state->crtc);
1085
1086 crtc_state->connector_mask &=
1087 ~(1 << drm_connector_index(conn_state->connector));
1088 }
1089
1039 if (crtc) { 1090 if (crtc) {
1040 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); 1091 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
1041 if (IS_ERR(crtc_state)) 1092 if (IS_ERR(crtc_state))
1042 return PTR_ERR(crtc_state); 1093 return PTR_ERR(crtc_state);
1094
1095 crtc_state->connector_mask |=
1096 1 << drm_connector_index(conn_state->connector);
1043 } 1097 }
1044 1098
1045 conn_state->crtc = crtc; 1099 conn_state->crtc = crtc;
1046 1100
1047 if (crtc) 1101 if (crtc)
1048 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n", 1102 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
1049 conn_state, crtc->base.id); 1103 conn_state, crtc->base.id, crtc->name);
1050 else 1104 else
1051 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", 1105 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
1052 conn_state); 1106 conn_state);
@@ -1085,8 +1139,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1085 if (ret) 1139 if (ret)
1086 return ret; 1140 return ret;
1087 1141
1088 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n", 1142 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
1089 crtc->base.id, state); 1143 crtc->base.id, crtc->name, state);
1090 1144
1091 /* 1145 /*
1092 * Changed connectors are already in @state, so only need to look at the 1146 * Changed connectors are already in @state, so only need to look at the
@@ -1145,35 +1199,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
1145EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1199EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1146 1200
1147/** 1201/**
1148 * drm_atomic_connectors_for_crtc - count number of connected outputs
1149 * @state: atomic state
1150 * @crtc: DRM crtc
1151 *
1152 * This function counts all connectors which will be connected to @crtc
1153 * according to @state. Useful to recompute the enable state for @crtc.
1154 */
1155int
1156drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
1157 struct drm_crtc *crtc)
1158{
1159 struct drm_connector *connector;
1160 struct drm_connector_state *conn_state;
1161
1162 int i, num_connected_connectors = 0;
1163
1164 for_each_connector_in_state(state, connector, conn_state, i) {
1165 if (conn_state->crtc == crtc)
1166 num_connected_connectors++;
1167 }
1168
1169 DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
1170 state, num_connected_connectors, crtc->base.id);
1171
1172 return num_connected_connectors;
1173}
1174EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
1175
1176/**
1177 * drm_atomic_legacy_backoff - locking backoff for legacy ioctls 1202 * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
1178 * @state: atomic state 1203 * @state: atomic state
1179 * 1204 *
@@ -1220,8 +1245,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1220 for_each_plane_in_state(state, plane, plane_state, i) { 1245 for_each_plane_in_state(state, plane, plane_state, i) {
1221 ret = drm_atomic_plane_check(plane, plane_state); 1246 ret = drm_atomic_plane_check(plane, plane_state);
1222 if (ret) { 1247 if (ret) {
1223 DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n", 1248 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1224 plane->base.id); 1249 plane->base.id, plane->name);
1225 return ret; 1250 return ret;
1226 } 1251 }
1227 } 1252 }
@@ -1229,8 +1254,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1229 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1254 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1230 ret = drm_atomic_crtc_check(crtc, crtc_state); 1255 ret = drm_atomic_crtc_check(crtc, crtc_state);
1231 if (ret) { 1256 if (ret) {
1232 DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n", 1257 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1233 crtc->base.id); 1258 crtc->base.id, crtc->name);
1234 return ret; 1259 return ret;
1235 } 1260 }
1236 } 1261 }
@@ -1241,8 +1266,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1241 if (!state->allow_modeset) { 1266 if (!state->allow_modeset) {
1242 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1267 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1243 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1268 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1244 DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", 1269 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1245 crtc->base.id); 1270 crtc->base.id, crtc->name);
1246 return -EINVAL; 1271 return -EINVAL;
1247 } 1272 }
1248 } 1273 }
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 74a5fc4deef6..57cccd68ca52 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -52,6 +52,12 @@
52 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the 52 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
53 * various functions to implement set_property callbacks. New drivers must not 53 * various functions to implement set_property callbacks. New drivers must not
54 * implement these functions themselves but must use the provided helpers. 54 * implement these functions themselves but must use the provided helpers.
55 *
56 * The atomic helper uses the same function table structures as all other
57 * modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs,
58 * struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It
59 * also shares the struct &drm_plane_helper_funcs function table with the plane
60 * helpers.
55 */ 61 */
56static void 62static void
57drm_atomic_helper_plane_changed(struct drm_atomic_state *state, 63drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
@@ -82,8 +88,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
82 88
83static bool 89static bool
84check_pending_encoder_assignment(struct drm_atomic_state *state, 90check_pending_encoder_assignment(struct drm_atomic_state *state,
85 struct drm_encoder *new_encoder, 91 struct drm_encoder *new_encoder)
86 struct drm_connector *new_connector)
87{ 92{
88 struct drm_connector *connector; 93 struct drm_connector *connector;
89 struct drm_connector_state *conn_state; 94 struct drm_connector_state *conn_state;
@@ -137,9 +142,9 @@ steal_encoder(struct drm_atomic_state *state,
137 */ 142 */
138 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 143 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
139 144
140 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", 145 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
141 encoder->base.id, encoder->name, 146 encoder->base.id, encoder->name,
142 encoder_crtc->base.id); 147 encoder_crtc->base.id, encoder_crtc->name);
143 148
144 crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); 149 crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
145 if (IS_ERR(crtc_state)) 150 if (IS_ERR(crtc_state))
@@ -240,17 +245,18 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
240 } 245 }
241 246
242 if (new_encoder == connector_state->best_encoder) { 247 if (new_encoder == connector_state->best_encoder) {
243 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", 248 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
244 connector->base.id, 249 connector->base.id,
245 connector->name, 250 connector->name,
246 new_encoder->base.id, 251 new_encoder->base.id,
247 new_encoder->name, 252 new_encoder->name,
248 connector_state->crtc->base.id); 253 connector_state->crtc->base.id,
254 connector_state->crtc->name);
249 255
250 return 0; 256 return 0;
251 } 257 }
252 258
253 if (!check_pending_encoder_assignment(state, new_encoder, connector)) { 259 if (!check_pending_encoder_assignment(state, new_encoder)) {
254 DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n", 260 DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
255 connector->base.id, 261 connector->base.id,
256 connector->name); 262 connector->name);
@@ -279,12 +285,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
279 crtc_state = state->crtc_states[idx]; 285 crtc_state = state->crtc_states[idx];
280 crtc_state->connectors_changed = true; 286 crtc_state->connectors_changed = true;
281 287
282 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", 288 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
283 connector->base.id, 289 connector->base.id,
284 connector->name, 290 connector->name,
285 new_encoder->base.id, 291 new_encoder->base.id,
286 new_encoder->name, 292 new_encoder->name,
287 connector_state->crtc->base.id); 293 connector_state->crtc->base.id,
294 connector_state->crtc->name);
288 295
289 return 0; 296 return 0;
290} 297}
@@ -368,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
368 ret = funcs->mode_fixup(crtc, &crtc_state->mode, 375 ret = funcs->mode_fixup(crtc, &crtc_state->mode,
369 &crtc_state->adjusted_mode); 376 &crtc_state->adjusted_mode);
370 if (!ret) { 377 if (!ret) {
371 DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n", 378 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
372 crtc->base.id); 379 crtc->base.id, crtc->name);
373 return -EINVAL; 380 return -EINVAL;
374 } 381 }
375 } 382 }
@@ -416,14 +423,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
416 423
417 for_each_crtc_in_state(state, crtc, crtc_state, i) { 424 for_each_crtc_in_state(state, crtc, crtc_state, i) {
418 if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { 425 if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
419 DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n", 426 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
420 crtc->base.id); 427 crtc->base.id, crtc->name);
421 crtc_state->mode_changed = true; 428 crtc_state->mode_changed = true;
422 } 429 }
423 430
424 if (crtc->state->enable != crtc_state->enable) { 431 if (crtc->state->enable != crtc_state->enable) {
425 DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", 432 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
426 crtc->base.id); 433 crtc->base.id, crtc->name);
427 434
428 /* 435 /*
429 * For clarity this assignment is done here, but 436 * For clarity this assignment is done here, but
@@ -456,7 +463,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
456 * crtc only changed its mode but has the same set of connectors. 463 * crtc only changed its mode but has the same set of connectors.
457 */ 464 */
458 for_each_crtc_in_state(state, crtc, crtc_state, i) { 465 for_each_crtc_in_state(state, crtc, crtc_state, i) {
459 int num_connectors; 466 bool has_connectors =
467 !!crtc_state->connector_mask;
460 468
461 /* 469 /*
462 * We must set ->active_changed after walking connectors for 470 * We must set ->active_changed after walking connectors for
@@ -464,18 +472,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
464 * a full modeset because update_connector_routing force that. 472 * a full modeset because update_connector_routing force that.
465 */ 473 */
466 if (crtc->state->active != crtc_state->active) { 474 if (crtc->state->active != crtc_state->active) {
467 DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n", 475 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
468 crtc->base.id); 476 crtc->base.id, crtc->name);
469 crtc_state->active_changed = true; 477 crtc_state->active_changed = true;
470 } 478 }
471 479
472 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 480 if (!drm_atomic_crtc_needs_modeset(crtc_state))
473 continue; 481 continue;
474 482
475 DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", 483 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
476 crtc->base.id, 484 crtc->base.id, crtc->name,
477 crtc_state->enable ? 'y' : 'n', 485 crtc_state->enable ? 'y' : 'n',
478 crtc_state->active ? 'y' : 'n'); 486 crtc_state->active ? 'y' : 'n');
479 487
480 ret = drm_atomic_add_affected_connectors(state, crtc); 488 ret = drm_atomic_add_affected_connectors(state, crtc);
481 if (ret != 0) 489 if (ret != 0)
@@ -485,12 +493,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
485 if (ret != 0) 493 if (ret != 0)
486 return ret; 494 return ret;
487 495
488 num_connectors = drm_atomic_connectors_for_crtc(state, 496 if (crtc_state->enable != has_connectors) {
489 crtc); 497 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
490 498 crtc->base.id, crtc->name);
491 if (crtc_state->enable != !!num_connectors) {
492 DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
493 crtc->base.id);
494 499
495 return -EINVAL; 500 return -EINVAL;
496 } 501 }
@@ -537,8 +542,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
537 542
538 ret = funcs->atomic_check(plane, plane_state); 543 ret = funcs->atomic_check(plane, plane_state);
539 if (ret) { 544 if (ret) {
540 DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n", 545 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
541 plane->base.id); 546 plane->base.id, plane->name);
542 return ret; 547 return ret;
543 } 548 }
544 } 549 }
@@ -553,8 +558,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
553 558
554 ret = funcs->atomic_check(crtc, state->crtc_states[i]); 559 ret = funcs->atomic_check(crtc, state->crtc_states[i]);
555 if (ret) { 560 if (ret) {
556 DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n", 561 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
557 crtc->base.id); 562 crtc->base.id, crtc->name);
558 return ret; 563 return ret;
559 } 564 }
560 } 565 }
@@ -667,8 +672,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
667 672
668 funcs = crtc->helper_private; 673 funcs = crtc->helper_private;
669 674
670 DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", 675 DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
671 crtc->base.id); 676 crtc->base.id, crtc->name);
672 677
673 678
674 /* Right function depends upon target state. */ 679 /* Right function depends upon target state. */
@@ -779,8 +784,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
779 funcs = crtc->helper_private; 784 funcs = crtc->helper_private;
780 785
781 if (crtc->state->enable && funcs->mode_set_nofb) { 786 if (crtc->state->enable && funcs->mode_set_nofb) {
782 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", 787 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
783 crtc->base.id); 788 crtc->base.id, crtc->name);
784 789
785 funcs->mode_set_nofb(crtc); 790 funcs->mode_set_nofb(crtc);
786 } 791 }
@@ -879,8 +884,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
879 funcs = crtc->helper_private; 884 funcs = crtc->helper_private;
880 885
881 if (crtc->state->enable) { 886 if (crtc->state->enable) {
882 DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", 887 DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
883 crtc->base.id); 888 crtc->base.id, crtc->name);
884 889
885 if (funcs->enable) 890 if (funcs->enable)
886 funcs->enable(crtc); 891 funcs->enable(crtc);
@@ -1747,7 +1752,7 @@ static int update_output_state(struct drm_atomic_state *state,
1747 if (crtc == set->crtc) 1752 if (crtc == set->crtc)
1748 continue; 1753 continue;
1749 1754
1750 if (!drm_atomic_connectors_for_crtc(state, crtc)) { 1755 if (!crtc_state->connector_mask) {
1751 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, 1756 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
1752 NULL); 1757 NULL);
1753 if (ret < 0) 1758 if (ret < 0)
@@ -2277,6 +2282,15 @@ retry:
2277 goto fail; 2282 goto fail;
2278 drm_atomic_set_fb_for_plane(plane_state, fb); 2283 drm_atomic_set_fb_for_plane(plane_state, fb);
2279 2284
2285 /* Make sure we don't accidentally do a full modeset. */
2286 state->allow_modeset = false;
2287 if (!crtc_state->active) {
2288 DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
2289 crtc->base.id);
2290 ret = -EINVAL;
2291 goto fail;
2292 }
2293
2280 ret = drm_atomic_async_commit(state); 2294 ret = drm_atomic_async_commit(state);
2281 if (ret != 0) 2295 if (ret != 0)
2282 goto fail; 2296 goto fail;
@@ -2399,6 +2413,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
2399 * The simpler solution is to just reset the software state to everything off, 2413 * The simpler solution is to just reset the software state to everything off,
2400 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this 2414 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
2401 * the atomic helpers provide default reset implementations for all hooks. 2415 * the atomic helpers provide default reset implementations for all hooks.
2416 *
2417 * On the upside the precise state tracking of atomic simplifies system suspend
2418 * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
2419 * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
2420 * For other drivers the building blocks are split out, see the documentation
2421 * for these functions.
2402 */ 2422 */
2403 2423
2404/** 2424/**
@@ -2593,6 +2613,28 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
2593EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); 2613EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
2594 2614
2595/** 2615/**
2616 * __drm_atomic_helper_connector_reset - reset state on connector
2617 * @connector: drm connector
2618 * @conn_state: connector state to assign
2619 *
2620 * Initializes the newly allocated @conn_state and assigns it to
2621 * #connector ->state, usually required when initializing the drivers
2622 * or when called from the ->reset hook.
2623 *
2624 * This is useful for drivers that subclass the connector state.
2625 */
2626void
2627__drm_atomic_helper_connector_reset(struct drm_connector *connector,
2628 struct drm_connector_state *conn_state)
2629{
2630 if (conn_state)
2631 conn_state->connector = connector;
2632
2633 connector->state = conn_state;
2634}
2635EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
2636
2637/**
2596 * drm_atomic_helper_connector_reset - default ->reset hook for connectors 2638 * drm_atomic_helper_connector_reset - default ->reset hook for connectors
2597 * @connector: drm connector 2639 * @connector: drm connector
2598 * 2640 *
@@ -2602,11 +2644,11 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
2602 */ 2644 */
2603void drm_atomic_helper_connector_reset(struct drm_connector *connector) 2645void drm_atomic_helper_connector_reset(struct drm_connector *connector)
2604{ 2646{
2605 kfree(connector->state); 2647 struct drm_connector_state *conn_state =
2606 connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL); 2648 kzalloc(sizeof(*conn_state), GFP_KERNEL);
2607 2649
2608 if (connector->state) 2650 kfree(connector->state);
2609 connector->state->connector = connector; 2651 __drm_atomic_helper_connector_reset(connector, conn_state);
2610} 2652}
2611EXPORT_SYMBOL(drm_atomic_helper_connector_reset); 2653EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
2612 2654
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 6b8f7211e543..bd93453afa61 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -31,14 +31,14 @@
31/** 31/**
32 * DOC: overview 32 * DOC: overview
33 * 33 *
34 * drm_bridge represents a device that hangs on to an encoder. These are handy 34 * struct &drm_bridge represents a device that hangs on to an encoder. These are
35 * when a regular drm_encoder entity isn't enough to represent the entire 35 * handy when a regular &drm_encoder entity isn't enough to represent the entire
36 * encoder chain. 36 * encoder chain.
37 * 37 *
38 * A bridge is always associated to a single drm_encoder at a time, but can be 38 * A bridge is always attached to a single &drm_encoder at a time, but can be
39 * either connected to it directly, or through an intermediate bridge: 39 * either connected to it directly, or through an intermediate bridge:
40 * 40 *
41 * encoder ---> bridge B ---> bridge A 41 * encoder ---> bridge B ---> bridge A
42 * 42 *
43 * Here, the output of the encoder feeds to bridge B, and that furthers feeds to 43 * Here, the output of the encoder feeds to bridge B, and that furthers feeds to
44 * bridge A. 44 * bridge A.
@@ -46,11 +46,16 @@
46 * The driver using the bridge is responsible to make the associations between 46 * The driver using the bridge is responsible to make the associations between
47 * the encoder and bridges. Once these links are made, the bridges will 47 * the encoder and bridges. Once these links are made, the bridges will
48 * participate along with encoder functions to perform mode_set/enable/disable 48 * participate along with encoder functions to perform mode_set/enable/disable
49 * through the ops provided in drm_bridge_funcs. 49 * through the ops provided in &drm_bridge_funcs.
50 * 50 *
51 * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes, 51 * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
52 * crtcs, encoders or connectors. They just provide additional hooks to get the 52 * CRTCs, encoders or connectors and hence are not visible to userspace. They
53 * desired output at the end of the encoder chain. 53 * just provide additional hooks to get the desired output at the end of the
54 * encoder chain.
55 *
56 * Bridges can also be chained up using the next pointer in struct &drm_bridge.
57 *
58 * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
54 */ 59 */
55 60
56static DEFINE_MUTEX(bridge_lock); 61static DEFINE_MUTEX(bridge_lock);
@@ -122,34 +127,12 @@ EXPORT_SYMBOL(drm_bridge_attach);
122/** 127/**
123 * DOC: bridge callbacks 128 * DOC: bridge callbacks
124 * 129 *
125 * The drm_bridge_funcs ops are populated by the bridge driver. The drm 130 * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
126 * internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c 131 * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
127 * These helpers call a specific drm_bridge_funcs op for all the bridges 132 * These helpers call a specific &drm_bridge_funcs op for all the bridges
128 * during encoder configuration. 133 * during encoder configuration.
129 * 134 *
130 * When creating a bridge driver, one can implement drm_bridge_funcs op with 135 * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
131 * the help of these rough rules:
132 *
133 * pre_enable: this contains things needed to be done for the bridge before
134 * its clock and timings are enabled by its source. For a bridge, its source
135 * is generally the encoder or bridge just before it in the encoder chain.
136 *
137 * enable: this contains things needed to be done for the bridge once its
138 * source is enabled. In other words, enable is called once the source is
139 * ready with clock and timing needed by the bridge.
140 *
141 * disable: this contains things needed to be done for the bridge assuming
142 * that its source is still enabled, i.e. clock and timings are still on.
143 *
144 * post_disable: this contains things needed to be done for the bridge once
145 * its source is disabled, i.e. once clocks and timings are off.
146 *
147 * mode_fixup: this should fixup the given mode for the bridge. It is called
148 * after the encoder's mode fixup. mode_fixup can also reject a mode completely
149 * if it's unsuitable for the hardware.
150 *
151 * mode_set: this sets up the mode for the bridge. It assumes that its source
152 * (an encoder or a bridge) has set the mode too.
153 */ 136 */
154 137
155/** 138/**
@@ -159,7 +142,7 @@ EXPORT_SYMBOL(drm_bridge_attach);
159 * @mode: desired mode to be set for the bridge 142 * @mode: desired mode to be set for the bridge
160 * @adjusted_mode: updated mode that works for this bridge 143 * @adjusted_mode: updated mode that works for this bridge
161 * 144 *
162 * Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the 145 * Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the
163 * encoder chain, starting from the first bridge to the last. 146 * encoder chain, starting from the first bridge to the last.
164 * 147 *
165 * Note: the bridge passed should be the one closest to the encoder 148 * Note: the bridge passed should be the one closest to the encoder
@@ -186,11 +169,11 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
186EXPORT_SYMBOL(drm_bridge_mode_fixup); 169EXPORT_SYMBOL(drm_bridge_mode_fixup);
187 170
188/** 171/**
189 * drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all 172 * drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all
190 * bridges in the encoder chain. 173 * bridges in the encoder chain.
191 * @bridge: bridge control structure 174 * @bridge: bridge control structure
192 * 175 *
193 * Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder 176 * Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder
194 * chain, starting from the last bridge to the first. These are called before 177 * chain, starting from the last bridge to the first. These are called before
195 * calling the encoder's prepare op. 178 * calling the encoder's prepare op.
196 * 179 *
@@ -208,11 +191,11 @@ void drm_bridge_disable(struct drm_bridge *bridge)
208EXPORT_SYMBOL(drm_bridge_disable); 191EXPORT_SYMBOL(drm_bridge_disable);
209 192
210/** 193/**
211 * drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for 194 * drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for
212 * all bridges in the encoder chain. 195 * all bridges in the encoder chain.
213 * @bridge: bridge control structure 196 * @bridge: bridge control structure
214 * 197 *
215 * Calls 'post_disable' drm_bridge_funcs op for all the bridges in the 198 * Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the
216 * encoder chain, starting from the first bridge to the last. These are called 199 * encoder chain, starting from the first bridge to the last. These are called
217 * after completing the encoder's prepare op. 200 * after completing the encoder's prepare op.
218 * 201 *
@@ -236,7 +219,7 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
236 * @mode: desired mode to be set for the bridge 219 * @mode: desired mode to be set for the bridge
237 * @adjusted_mode: updated mode that works for this bridge 220 * @adjusted_mode: updated mode that works for this bridge
238 * 221 *
239 * Calls 'mode_set' drm_bridge_funcs op for all the bridges in the 222 * Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the
240 * encoder chain, starting from the first bridge to the last. 223 * encoder chain, starting from the first bridge to the last.
241 * 224 *
242 * Note: the bridge passed should be the one closest to the encoder 225 * Note: the bridge passed should be the one closest to the encoder
@@ -256,11 +239,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
256EXPORT_SYMBOL(drm_bridge_mode_set); 239EXPORT_SYMBOL(drm_bridge_mode_set);
257 240
258/** 241/**
259 * drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all 242 * drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all
260 * bridges in the encoder chain. 243 * bridges in the encoder chain.
261 * @bridge: bridge control structure 244 * @bridge: bridge control structure
262 * 245 *
263 * Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder 246 * Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder
264 * chain, starting from the last bridge to the first. These are called 247 * chain, starting from the last bridge to the first. These are called
265 * before calling the encoder's commit op. 248 * before calling the encoder's commit op.
266 * 249 *
@@ -278,11 +261,11 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
278EXPORT_SYMBOL(drm_bridge_pre_enable); 261EXPORT_SYMBOL(drm_bridge_pre_enable);
279 262
280/** 263/**
281 * drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges 264 * drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges
282 * in the encoder chain. 265 * in the encoder chain.
283 * @bridge: bridge control structure 266 * @bridge: bridge control structure
284 * 267 *
285 * Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder 268 * Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder
286 * chain, starting from the first bridge to the last. These are called 269 * chain, starting from the first bridge to the last. These are called
287 * after completing the encoder's commit op. 270 * after completing the encoder's commit op.
288 * 271 *
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 32dd134700bd..d40bab29747e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -649,6 +649,18 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
649 649
650DEFINE_WW_CLASS(crtc_ww_class); 650DEFINE_WW_CLASS(crtc_ww_class);
651 651
652static unsigned int drm_num_crtcs(struct drm_device *dev)
653{
654 unsigned int num = 0;
655 struct drm_crtc *tmp;
656
657 drm_for_each_crtc(tmp, dev) {
658 num++;
659 }
660
661 return num;
662}
663
652/** 664/**
653 * drm_crtc_init_with_planes - Initialise a new CRTC object with 665 * drm_crtc_init_with_planes - Initialise a new CRTC object with
654 * specified primary and cursor planes. 666 * specified primary and cursor planes.
@@ -657,6 +669,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
657 * @primary: Primary plane for CRTC 669 * @primary: Primary plane for CRTC
658 * @cursor: Cursor plane for CRTC 670 * @cursor: Cursor plane for CRTC
659 * @funcs: callbacks for the new CRTC 671 * @funcs: callbacks for the new CRTC
672 * @name: printf style format string for the CRTC name, or NULL for default name
660 * 673 *
661 * Inits a new object created as base part of a driver crtc object. 674 * Inits a new object created as base part of a driver crtc object.
662 * 675 *
@@ -666,7 +679,8 @@ DEFINE_WW_CLASS(crtc_ww_class);
666int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, 679int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
667 struct drm_plane *primary, 680 struct drm_plane *primary,
668 struct drm_plane *cursor, 681 struct drm_plane *cursor,
669 const struct drm_crtc_funcs *funcs) 682 const struct drm_crtc_funcs *funcs,
683 const char *name, ...)
670{ 684{
671 struct drm_mode_config *config = &dev->mode_config; 685 struct drm_mode_config *config = &dev->mode_config;
672 int ret; 686 int ret;
@@ -682,6 +696,21 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
682 if (ret) 696 if (ret)
683 return ret; 697 return ret;
684 698
699 if (name) {
700 va_list ap;
701
702 va_start(ap, name);
703 crtc->name = kvasprintf(GFP_KERNEL, name, ap);
704 va_end(ap);
705 } else {
706 crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
707 drm_num_crtcs(dev));
708 }
709 if (!crtc->name) {
710 drm_mode_object_put(dev, &crtc->base);
711 return -ENOMEM;
712 }
713
685 crtc->base.properties = &crtc->properties; 714 crtc->base.properties = &crtc->properties;
686 715
687 list_add_tail(&crtc->head, &config->crtc_list); 716 list_add_tail(&crtc->head, &config->crtc_list);
@@ -728,6 +757,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
728 if (crtc->state && crtc->funcs->atomic_destroy_state) 757 if (crtc->state && crtc->funcs->atomic_destroy_state)
729 crtc->funcs->atomic_destroy_state(crtc, crtc->state); 758 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
730 759
760 kfree(crtc->name);
761
731 memset(crtc, 0, sizeof(*crtc)); 762 memset(crtc, 0, sizeof(*crtc));
732} 763}
733EXPORT_SYMBOL(drm_crtc_cleanup); 764EXPORT_SYMBOL(drm_crtc_cleanup);
@@ -1075,6 +1106,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
1075 * @encoder: the encoder to init 1106 * @encoder: the encoder to init
1076 * @funcs: callbacks for this encoder 1107 * @funcs: callbacks for this encoder
1077 * @encoder_type: user visible type of the encoder 1108 * @encoder_type: user visible type of the encoder
1109 * @name: printf style format string for the encoder name, or NULL for default name
1078 * 1110 *
1079 * Initialises a preallocated encoder. Encoder should be 1111 * Initialises a preallocated encoder. Encoder should be
1080 * subclassed as part of driver encoder objects. 1112 * subclassed as part of driver encoder objects.
@@ -1085,7 +1117,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
1085int drm_encoder_init(struct drm_device *dev, 1117int drm_encoder_init(struct drm_device *dev,
1086 struct drm_encoder *encoder, 1118 struct drm_encoder *encoder,
1087 const struct drm_encoder_funcs *funcs, 1119 const struct drm_encoder_funcs *funcs,
1088 int encoder_type) 1120 int encoder_type, const char *name, ...)
1089{ 1121{
1090 int ret; 1122 int ret;
1091 1123
@@ -1098,9 +1130,17 @@ int drm_encoder_init(struct drm_device *dev,
1098 encoder->dev = dev; 1130 encoder->dev = dev;
1099 encoder->encoder_type = encoder_type; 1131 encoder->encoder_type = encoder_type;
1100 encoder->funcs = funcs; 1132 encoder->funcs = funcs;
1101 encoder->name = kasprintf(GFP_KERNEL, "%s-%d", 1133 if (name) {
1102 drm_encoder_enum_list[encoder_type].name, 1134 va_list ap;
1103 encoder->base.id); 1135
1136 va_start(ap, name);
1137 encoder->name = kvasprintf(GFP_KERNEL, name, ap);
1138 va_end(ap);
1139 } else {
1140 encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
1141 drm_encoder_enum_list[encoder_type].name,
1142 encoder->base.id);
1143 }
1104 if (!encoder->name) { 1144 if (!encoder->name) {
1105 ret = -ENOMEM; 1145 ret = -ENOMEM;
1106 goto out_put; 1146 goto out_put;
@@ -1141,6 +1181,18 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1141} 1181}
1142EXPORT_SYMBOL(drm_encoder_cleanup); 1182EXPORT_SYMBOL(drm_encoder_cleanup);
1143 1183
1184static unsigned int drm_num_planes(struct drm_device *dev)
1185{
1186 unsigned int num = 0;
1187 struct drm_plane *tmp;
1188
1189 drm_for_each_plane(tmp, dev) {
1190 num++;
1191 }
1192
1193 return num;
1194}
1195
1144/** 1196/**
1145 * drm_universal_plane_init - Initialize a new universal plane object 1197 * drm_universal_plane_init - Initialize a new universal plane object
1146 * @dev: DRM device 1198 * @dev: DRM device
@@ -1150,6 +1202,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
1150 * @formats: array of supported formats (%DRM_FORMAT_*) 1202 * @formats: array of supported formats (%DRM_FORMAT_*)
1151 * @format_count: number of elements in @formats 1203 * @format_count: number of elements in @formats
1152 * @type: type of plane (overlay, primary, cursor) 1204 * @type: type of plane (overlay, primary, cursor)
1205 * @name: printf style format string for the plane name, or NULL for default name
1153 * 1206 *
1154 * Initializes a plane object of type @type. 1207 * Initializes a plane object of type @type.
1155 * 1208 *
@@ -1160,7 +1213,8 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1160 unsigned long possible_crtcs, 1213 unsigned long possible_crtcs,
1161 const struct drm_plane_funcs *funcs, 1214 const struct drm_plane_funcs *funcs,
1162 const uint32_t *formats, unsigned int format_count, 1215 const uint32_t *formats, unsigned int format_count,
1163 enum drm_plane_type type) 1216 enum drm_plane_type type,
1217 const char *name, ...)
1164{ 1218{
1165 struct drm_mode_config *config = &dev->mode_config; 1219 struct drm_mode_config *config = &dev->mode_config;
1166 int ret; 1220 int ret;
@@ -1182,6 +1236,22 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1182 return -ENOMEM; 1236 return -ENOMEM;
1183 } 1237 }
1184 1238
1239 if (name) {
1240 va_list ap;
1241
1242 va_start(ap, name);
1243 plane->name = kvasprintf(GFP_KERNEL, name, ap);
1244 va_end(ap);
1245 } else {
1246 plane->name = kasprintf(GFP_KERNEL, "plane-%d",
1247 drm_num_planes(dev));
1248 }
1249 if (!plane->name) {
1250 kfree(plane->format_types);
1251 drm_mode_object_put(dev, &plane->base);
1252 return -ENOMEM;
1253 }
1254
1185 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); 1255 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
1186 plane->format_count = format_count; 1256 plane->format_count = format_count;
1187 plane->possible_crtcs = possible_crtcs; 1257 plane->possible_crtcs = possible_crtcs;
@@ -1240,7 +1310,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
1240 1310
1241 type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 1311 type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
1242 return drm_universal_plane_init(dev, plane, possible_crtcs, funcs, 1312 return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
1243 formats, format_count, type); 1313 formats, format_count, type, NULL);
1244} 1314}
1245EXPORT_SYMBOL(drm_plane_init); 1315EXPORT_SYMBOL(drm_plane_init);
1246 1316
@@ -1272,6 +1342,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
1272 if (plane->state && plane->funcs->atomic_destroy_state) 1342 if (plane->state && plane->funcs->atomic_destroy_state)
1273 plane->funcs->atomic_destroy_state(plane, plane->state); 1343 plane->funcs->atomic_destroy_state(plane, plane->state);
1274 1344
1345 kfree(plane->name);
1346
1275 memset(plane, 0, sizeof(*plane)); 1347 memset(plane, 0, sizeof(*plane));
1276} 1348}
1277EXPORT_SYMBOL(drm_plane_cleanup); 1349EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1801,7 +1873,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1801 copied = 0; 1873 copied = 0;
1802 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; 1874 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
1803 drm_for_each_crtc(crtc, dev) { 1875 drm_for_each_crtc(crtc, dev) {
1804 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 1876 DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
1877 crtc->base.id, crtc->name);
1805 if (put_user(crtc->base.id, crtc_id + copied)) { 1878 if (put_user(crtc->base.id, crtc_id + copied)) {
1806 ret = -EFAULT; 1879 ret = -EFAULT;
1807 goto out; 1880 goto out;
@@ -2646,7 +2719,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2646 ret = -ENOENT; 2719 ret = -ENOENT;
2647 goto out; 2720 goto out;
2648 } 2721 }
2649 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 2722 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
2650 2723
2651 if (crtc_req->mode_valid) { 2724 if (crtc_req->mode_valid) {
2652 /* If we have a mode we need a framebuffer. */ 2725 /* If we have a mode we need a framebuffer. */
@@ -4785,9 +4858,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
4785 4858
4786 /* Do DPMS ourselves */ 4859 /* Do DPMS ourselves */
4787 if (property == connector->dev->mode_config.dpms_property) { 4860 if (property == connector->dev->mode_config.dpms_property) {
4788 ret = 0; 4861 ret = (*connector->funcs->dpms)(connector, (int)value);
4789 if (connector->funcs->dpms)
4790 ret = (*connector->funcs->dpms)(connector, (int)value);
4791 } else if (connector->funcs->set_property) 4862 } else if (connector->funcs->set_property)
4792 ret = connector->funcs->set_property(connector, property, value); 4863 ret = connector->funcs->set_property(connector, property, value);
4793 4864
@@ -4983,6 +5054,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
4983{ 5054{
4984 int i; 5055 int i;
4985 5056
5057 /*
5058 * In the past, drivers have attempted to model the static association
5059 * of connector to encoder in simple connector/encoder devices using a
5060 * direct assignment of connector->encoder = encoder. This connection
5061 * is a logical one and the responsibility of the core, so drivers are
5062 * expected not to mess with this.
5063 *
5064 * Note that the error return should've been enough here, but a large
5065 * majority of drivers ignores the return value, so add in a big WARN
5066 * to get people's attention.
5067 */
5068 if (WARN_ON(connector->encoder))
5069 return -EINVAL;
5070
4986 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 5071 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
4987 if (connector->encoder_ids[i] == 0) { 5072 if (connector->encoder_ids[i] == 0) {
4988 connector->encoder_ids[i] = encoder->base.id; 5073 connector->encoder_ids[i] = encoder->base.id;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 10d0989db273..a02a7f9a6a9d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -51,6 +51,11 @@
51 * the same callbacks which drivers can use to e.g. restore the modeset 51 * the same callbacks which drivers can use to e.g. restore the modeset
52 * configuration on resume with drm_helper_resume_force_mode(). 52 * configuration on resume with drm_helper_resume_force_mode().
53 * 53 *
54 * Note that this helper library doesn't track the current power state of CRTCs
55 * and encoders. It can call callbacks like ->dpms() even though the hardware is
56 * already in the desired state. This deficiency has been fixed in the atomic
57 * helpers.
58 *
54 * The driver callbacks are mostly compatible with the atomic modeset helpers, 59 * The driver callbacks are mostly compatible with the atomic modeset helpers,
55 * except for the handling of the primary plane: Atomic helpers require that the 60 * except for the handling of the primary plane: Atomic helpers require that the
56 * primary plane is implemented as a real standalone plane and not directly tied 61 * primary plane is implemented as a real standalone plane and not directly tied
@@ -62,6 +67,11 @@
62 * converting to the plane helpers). New drivers must not use these functions 67 * converting to the plane helpers). New drivers must not use these functions
63 * but need to implement the atomic interface instead, potentially using the 68 * but need to implement the atomic interface instead, potentially using the
64 * atomic helpers for that. 69 * atomic helpers for that.
70 *
71 * These legacy modeset helpers use the same function table structures as
72 * all other modesetting helpers. See the documentation for struct
73 * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
74 * &drm_connector_helper_funcs.
65 */ 75 */
66MODULE_AUTHOR("David Airlie, Jesse Barnes"); 76MODULE_AUTHOR("David Airlie, Jesse Barnes");
67MODULE_DESCRIPTION("DRM KMS helper"); 77MODULE_DESCRIPTION("DRM KMS helper");
@@ -206,8 +216,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
206 * @dev: DRM device 216 * @dev: DRM device
207 * 217 *
208 * This function walks through the entire mode setting configuration of @dev. It 218 * This function walks through the entire mode setting configuration of @dev. It
209 * will remove any crtc links of unused encoders and encoder links of 219 * will remove any CRTC links of unused encoders and encoder links of
210 * disconnected connectors. Then it will disable all unused encoders and crtcs 220 * disconnected connectors. Then it will disable all unused encoders and CRTCs
211 * either by calling their disable callback if available or by calling their 221 * either by calling their disable callback if available or by calling their
212 * dpms callback with DRM_MODE_DPMS_OFF. 222 * dpms callback with DRM_MODE_DPMS_OFF.
213 */ 223 */
@@ -329,7 +339,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
329 DRM_DEBUG_KMS("CRTC fixup failed\n"); 339 DRM_DEBUG_KMS("CRTC fixup failed\n");
330 goto done; 340 goto done;
331 } 341 }
332 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 342 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
333 343
334 crtc->hwmode = *adjusted_mode; 344 crtc->hwmode = *adjusted_mode;
335 345
@@ -445,11 +455,36 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
445 * drm_crtc_helper_set_config - set a new config from userspace 455 * drm_crtc_helper_set_config - set a new config from userspace
446 * @set: mode set configuration 456 * @set: mode set configuration
447 * 457 *
448 * Setup a new configuration, provided by the upper layers (either an ioctl call 458 * The drm_crtc_helper_set_config() helper function implements the set_config
449 * from userspace or internally e.g. from the fbdev support code) in @set, and 459 * callback of struct &drm_crtc_funcs for drivers using the legacy CRTC helpers.
450 * enable it. This is the main helper functions for drivers that implement 460 *
451 * kernel mode setting with the crtc helper functions and the assorted 461 * It first tries to locate the best encoder for each connector by calling the
452 * ->prepare(), ->modeset() and ->commit() helper callbacks. 462 * connector ->best_encoder() (struct &drm_connector_helper_funcs) helper
463 * operation.
464 *
465 * After locating the appropriate encoders, the helper function will call the
466 * mode_fixup encoder and CRTC helper operations to adjust the requested mode,
467 * or reject it completely in which case an error will be returned to the
468 * application. If the new configuration after mode adjustment is identical to
469 * the current configuration the helper function will return without performing
470 * any other operation.
471 *
472 * If the adjusted mode is identical to the current mode but changes to the
473 * frame buffer need to be applied, the drm_crtc_helper_set_config() function
474 * will call the CRTC ->mode_set_base() (struct &drm_crtc_helper_funcs) helper
475 * operation.
476 *
477 * If the adjusted mode differs from the current mode, or if the
478 * ->mode_set_base() helper operation is not provided, the helper function
479 * performs a full mode set sequence by calling the ->prepare(), ->mode_set()
480 * and ->commit() CRTC and encoder helper operations, in that order.
481 * Alternatively it can also use the dpms and disable helper operations. For
482 * details see struct &drm_crtc_helper_funcs and struct
483 * &drm_encoder_helper_funcs.
484 *
485 * This function is deprecated. New drivers must implement atomic modeset
486 * support, for which this function is unsuitable. Instead drivers should use
487 * drm_atomic_helper_set_config().
453 * 488 *
454 * Returns: 489 * Returns:
455 * Returns 0 on success, negative errno numbers on failure. 490 * Returns 0 on success, negative errno numbers on failure.
@@ -484,11 +519,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
484 set->fb = NULL; 519 set->fb = NULL;
485 520
486 if (set->fb) { 521 if (set->fb) {
487 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 522 DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
488 set->crtc->base.id, set->fb->base.id, 523 set->crtc->base.id, set->crtc->name,
489 (int)set->num_connectors, set->x, set->y); 524 set->fb->base.id,
525 (int)set->num_connectors, set->x, set->y);
490 } else { 526 } else {
491 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 527 DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
528 set->crtc->base.id, set->crtc->name);
492 drm_crtc_helper_disable(set->crtc); 529 drm_crtc_helper_disable(set->crtc);
493 return 0; 530 return 0;
494 } 531 }
@@ -628,12 +665,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
628 connector->encoder->crtc = new_crtc; 665 connector->encoder->crtc = new_crtc;
629 } 666 }
630 if (new_crtc) { 667 if (new_crtc) {
631 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 668 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
632 connector->base.id, connector->name, 669 connector->base.id, connector->name,
633 new_crtc->base.id); 670 new_crtc->base.id, new_crtc->name);
634 } else { 671 } else {
635 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 672 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
636 connector->base.id, connector->name); 673 connector->base.id, connector->name);
637 } 674 }
638 } 675 }
639 676
@@ -650,8 +687,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
650 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 687 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
651 set->x, set->y, 688 set->x, set->y,
652 save_set.fb)) { 689 save_set.fb)) {
653 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 690 DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
654 set->crtc->base.id); 691 set->crtc->base.id, set->crtc->name);
655 set->crtc->primary->fb = save_set.fb; 692 set->crtc->primary->fb = save_set.fb;
656 ret = -EINVAL; 693 ret = -EINVAL;
657 goto fail; 694 goto fail;
@@ -758,10 +795,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
758 * @connector: affected connector 795 * @connector: affected connector
759 * @mode: DPMS mode 796 * @mode: DPMS mode
760 * 797 *
761 * This is the main helper function provided by the crtc helper framework for 798 * The drm_helper_connector_dpms() helper function implements the ->dpms()
799 * callback of struct &drm_connector_funcs for drivers using the legacy CRTC helpers.
800 *
801 * This is the main helper function provided by the CRTC helper framework for
762 * implementing the DPMS connector attribute. It computes the new desired DPMS 802 * implementing the DPMS connector attribute. It computes the new desired DPMS
763 * state for all encoders and crtcs in the output mesh and calls the ->dpms() 803 * state for all encoders and CRTCs in the output mesh and calls the ->dpms()
764 * callback provided by the driver appropriately. 804 * callbacks provided by the driver in struct &drm_crtc_helper_funcs and struct
805 * &drm_encoder_helper_funcs appropriately.
806 *
807 * This function is deprecated. New drivers must implement atomic modeset
808 * support, for which this function is unsuitable. Instead drivers should use
809 * drm_atomic_helper_connector_dpms().
765 * 810 *
766 * Returns: 811 * Returns:
767 * Always returns 0. 812 * Always returns 0.
@@ -919,9 +964,9 @@ EXPORT_SYMBOL(drm_helper_resume_force_mode);
919 * @old_fb: previous framebuffer 964 * @old_fb: previous framebuffer
920 * 965 *
921 * This function implements a callback useable as the ->mode_set callback 966 * This function implements a callback useable as the ->mode_set callback
922 * required by the crtc helpers. Besides the atomic plane helper functions for 967 * required by the CRTC helpers. Besides the atomic plane helper functions for
923 * the primary plane the driver must also provide the ->mode_set_nofb callback 968 * the primary plane the driver must also provide the ->mode_set_nofb callback
924 * to set up the crtc. 969 * to set up the CRTC.
925 * 970 *
926 * This is a transitional helper useful for converting drivers to the atomic 971 * This is a transitional helper useful for converting drivers to the atomic
927 * interfaces. 972 * interfaces.
@@ -985,7 +1030,7 @@ EXPORT_SYMBOL(drm_helper_crtc_mode_set);
985 * @old_fb: previous framebuffer 1030 * @old_fb: previous framebuffer
986 * 1031 *
987 * This function implements a callback useable as the ->mode_set_base used 1032 * This function implements a callback useable as the ->mode_set_base used
988 * required by the crtc helpers. The driver must provide the atomic plane helper 1033 * required by the CRTC helpers. The driver must provide the atomic plane helper
989 * functions for the primary plane. 1034 * functions for the primary plane.
990 * 1035 *
991 * This is a transitional helper useful for converting drivers to the atomic 1036 * This is a transitional helper useful for converting drivers to the atomic
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f50eb7b87c2f..6ed90a2437e5 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -978,17 +978,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
978static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, 978static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
979 u8 *rad) 979 u8 *rad)
980{ 980{
981 int lct = port->parent->lct; 981 int parent_lct = port->parent->lct;
982 int shift = 4; 982 int shift = 4;
983 int idx = lct / 2; 983 int idx = (parent_lct - 1) / 2;
984 if (lct > 1) { 984 if (parent_lct > 1) {
985 memcpy(rad, port->parent->rad, idx); 985 memcpy(rad, port->parent->rad, idx + 1);
986 shift = (lct % 2) ? 4 : 0; 986 shift = (parent_lct % 2) ? 4 : 0;
987 } else 987 } else
988 rad[0] = 0; 988 rad[0] = 0;
989 989
990 rad[idx] |= port->port_num << shift; 990 rad[idx] |= port->port_num << shift;
991 return lct + 1; 991 return parent_lct + 1;
992} 992}
993 993
994/* 994/*
@@ -1044,7 +1044,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1044 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 1044 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1045 for (i = 0; i < (mstb->lct - 1); i++) { 1045 for (i = 0; i < (mstb->lct - 1); i++) {
1046 int shift = (i % 2) ? 0 : 4; 1046 int shift = (i % 2) ? 0 : 4;
1047 int port_num = mstb->rad[i / 2] >> shift; 1047 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1048 snprintf(temp, sizeof(temp), "-%d", port_num); 1048 snprintf(temp, sizeof(temp), "-%d", port_num);
1049 strlcat(proppath, temp, proppath_size); 1049 strlcat(proppath, temp, proppath_size);
1050 } 1050 }
@@ -1195,7 +1195,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1195 1195
1196 for (i = 0; i < lct - 1; i++) { 1196 for (i = 0; i < lct - 1; i++) {
1197 int shift = (i % 2) ? 0 : 4; 1197 int shift = (i % 2) ? 0 : 4;
1198 int port_num = rad[i / 2] >> shift; 1198 int port_num = (rad[i / 2] >> shift) & 0xf;
1199 1199
1200 list_for_each_entry(port, &mstb->ports, next) { 1200 list_for_each_entry(port, &mstb->ports, next) {
1201 if (port->port_num == port_num) { 1201 if (port->port_num == port_num) {
@@ -1215,6 +1215,50 @@ out:
1215 return mstb; 1215 return mstb;
1216} 1216}
1217 1217
1218static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1219 struct drm_dp_mst_branch *mstb,
1220 uint8_t *guid)
1221{
1222 struct drm_dp_mst_branch *found_mstb;
1223 struct drm_dp_mst_port *port;
1224
1225 list_for_each_entry(port, &mstb->ports, next) {
1226 if (!port->mstb)
1227 continue;
1228
1229 if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
1230 return port->mstb;
1231
1232 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1233
1234 if (found_mstb)
1235 return found_mstb;
1236 }
1237
1238 return NULL;
1239}
1240
1241static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1242 struct drm_dp_mst_topology_mgr *mgr,
1243 uint8_t *guid)
1244{
1245 struct drm_dp_mst_branch *mstb;
1246
1247 /* find the port by iterating down */
1248 mutex_lock(&mgr->lock);
1249
1250 if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
1251 mstb = mgr->mst_primary;
1252 else
1253 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1254
1255 if (mstb)
1256 kref_get(&mstb->kref);
1257
1258 mutex_unlock(&mgr->lock);
1259 return mstb;
1260}
1261
1218static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1262static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1219 struct drm_dp_mst_branch *mstb) 1263 struct drm_dp_mst_branch *mstb)
1220{ 1264{
@@ -1325,6 +1369,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1325 struct drm_dp_sideband_msg_tx *txmsg) 1369 struct drm_dp_sideband_msg_tx *txmsg)
1326{ 1370{
1327 struct drm_dp_mst_branch *mstb = txmsg->dst; 1371 struct drm_dp_mst_branch *mstb = txmsg->dst;
1372 u8 req_type;
1328 1373
1329 /* both msg slots are full */ 1374 /* both msg slots are full */
1330 if (txmsg->seqno == -1) { 1375 if (txmsg->seqno == -1) {
@@ -1341,7 +1386,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1341 txmsg->seqno = 1; 1386 txmsg->seqno = 1;
1342 mstb->tx_slots[txmsg->seqno] = txmsg; 1387 mstb->tx_slots[txmsg->seqno] = txmsg;
1343 } 1388 }
1344 hdr->broadcast = 0; 1389
1390 req_type = txmsg->msg[0] & 0x7f;
1391 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1392 req_type == DP_RESOURCE_STATUS_NOTIFY)
1393 hdr->broadcast = 1;
1394 else
1395 hdr->broadcast = 0;
1345 hdr->path_msg = txmsg->path_msg; 1396 hdr->path_msg = txmsg->path_msg;
1346 hdr->lct = mstb->lct; 1397 hdr->lct = mstb->lct;
1347 hdr->lcr = mstb->lct - 1; 1398 hdr->lcr = mstb->lct - 1;
@@ -1443,26 +1494,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1443} 1494}
1444 1495
1445/* called holding qlock */ 1496/* called holding qlock */
1446static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1497static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1498 struct drm_dp_sideband_msg_tx *txmsg)
1447{ 1499{
1448 struct drm_dp_sideband_msg_tx *txmsg;
1449 int ret; 1500 int ret;
1450 1501
1451 /* construct a chunk from the first msg in the tx_msg queue */ 1502 /* construct a chunk from the first msg in the tx_msg queue */
1452 if (list_empty(&mgr->tx_msg_upq)) {
1453 mgr->tx_up_in_progress = false;
1454 return;
1455 }
1456
1457 txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1458 ret = process_single_tx_qlock(mgr, txmsg, true); 1503 ret = process_single_tx_qlock(mgr, txmsg, true);
1459 if (ret == 1) { 1504
1460 /* up txmsgs aren't put in slots - so free after we send it */ 1505 if (ret != 1)
1461 list_del(&txmsg->next);
1462 kfree(txmsg);
1463 } else if (ret)
1464 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1506 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1465 mgr->tx_up_in_progress = true; 1507
1508 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1466} 1509}
1467 1510
1468static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 1511static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -1683,6 +1726,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1683 if (mgr->proposed_vcpis[i]) { 1726 if (mgr->proposed_vcpis[i]) {
1684 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1727 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1685 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1728 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1729 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1686 } else { 1730 } else {
1687 port = NULL; 1731 port = NULL;
1688 req_payload.num_slots = 0; 1732 req_payload.num_slots = 0;
@@ -1698,6 +1742,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1698 if (req_payload.num_slots) { 1742 if (req_payload.num_slots) {
1699 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); 1743 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1700 mgr->payloads[i].num_slots = req_payload.num_slots; 1744 mgr->payloads[i].num_slots = req_payload.num_slots;
1745 mgr->payloads[i].vcpi = req_payload.vcpi;
1701 } else if (mgr->payloads[i].num_slots) { 1746 } else if (mgr->payloads[i].num_slots) {
1702 mgr->payloads[i].num_slots = 0; 1747 mgr->payloads[i].num_slots = 0;
1703 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]); 1748 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
@@ -1833,7 +1878,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
1833{ 1878{
1834 struct drm_dp_sideband_msg_reply_body reply; 1879 struct drm_dp_sideband_msg_reply_body reply;
1835 1880
1836 reply.reply_type = 1; 1881 reply.reply_type = 0;
1837 reply.req_type = req_type; 1882 reply.req_type = req_type;
1838 drm_dp_encode_sideband_reply(&reply, msg); 1883 drm_dp_encode_sideband_reply(&reply, msg);
1839 return 0; 1884 return 0;
@@ -1854,11 +1899,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1854 drm_dp_encode_up_ack_reply(txmsg, req_type); 1899 drm_dp_encode_up_ack_reply(txmsg, req_type);
1855 1900
1856 mutex_lock(&mgr->qlock); 1901 mutex_lock(&mgr->qlock);
1857 list_add_tail(&txmsg->next, &mgr->tx_msg_upq); 1902
1858 if (!mgr->tx_up_in_progress) { 1903 process_single_up_tx_qlock(mgr, txmsg);
1859 process_single_up_tx_qlock(mgr); 1904
1860 }
1861 mutex_unlock(&mgr->qlock); 1905 mutex_unlock(&mgr->qlock);
1906
1907 kfree(txmsg);
1862 return 0; 1908 return 0;
1863} 1909}
1864 1910
@@ -2155,28 +2201,50 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2155 2201
2156 if (mgr->up_req_recv.have_eomt) { 2202 if (mgr->up_req_recv.have_eomt) {
2157 struct drm_dp_sideband_msg_req_body msg; 2203 struct drm_dp_sideband_msg_req_body msg;
2158 struct drm_dp_mst_branch *mstb; 2204 struct drm_dp_mst_branch *mstb = NULL;
2159 bool seqno; 2205 bool seqno;
2160 mstb = drm_dp_get_mst_branch_device(mgr, 2206
2161 mgr->up_req_recv.initial_hdr.lct, 2207 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2162 mgr->up_req_recv.initial_hdr.rad); 2208 mstb = drm_dp_get_mst_branch_device(mgr,
2163 if (!mstb) { 2209 mgr->up_req_recv.initial_hdr.lct,
2164 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2210 mgr->up_req_recv.initial_hdr.rad);
2165 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2211 if (!mstb) {
2166 return 0; 2212 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2213 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2214 return 0;
2215 }
2167 } 2216 }
2168 2217
2169 seqno = mgr->up_req_recv.initial_hdr.seqno; 2218 seqno = mgr->up_req_recv.initial_hdr.seqno;
2170 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); 2219 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2171 2220
2172 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { 2221 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2173 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); 2222 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2223
2224 if (!mstb)
2225 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2226
2227 if (!mstb) {
2228 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2229 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2230 return 0;
2231 }
2232
2174 drm_dp_update_port(mstb, &msg.u.conn_stat); 2233 drm_dp_update_port(mstb, &msg.u.conn_stat);
2175 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2234 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2176 (*mgr->cbs->hotplug)(mgr); 2235 (*mgr->cbs->hotplug)(mgr);
2177 2236
2178 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2237 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2179 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); 2238 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2239 if (!mstb)
2240 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2241
2242 if (!mstb) {
2243 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2244 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2245 return 0;
2246 }
2247
2180 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); 2248 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2181 } 2249 }
2182 2250
@@ -2768,7 +2836,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2768 mutex_init(&mgr->qlock); 2836 mutex_init(&mgr->qlock);
2769 mutex_init(&mgr->payload_lock); 2837 mutex_init(&mgr->payload_lock);
2770 mutex_init(&mgr->destroy_connector_lock); 2838 mutex_init(&mgr->destroy_connector_lock);
2771 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2772 INIT_LIST_HEAD(&mgr->tx_msg_downq); 2839 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2773 INIT_LIST_HEAD(&mgr->destroy_connector_list); 2840 INIT_LIST_HEAD(&mgr->destroy_connector_list);
2774 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 2841 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7dd6728dd092..167c8d3d4a31 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -44,10 +44,6 @@ MODULE_AUTHOR(CORE_AUTHOR);
44MODULE_DESCRIPTION(CORE_DESC); 44MODULE_DESCRIPTION(CORE_DESC);
45MODULE_LICENSE("GPL and additional rights"); 45MODULE_LICENSE("GPL and additional rights");
46MODULE_PARM_DESC(debug, "Enable debug output"); 46MODULE_PARM_DESC(debug, "Enable debug output");
47MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
48MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
49MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
50
51module_param_named(debug, drm_debug, int, 0600); 47module_param_named(debug, drm_debug, int, 0600);
52 48
53static DEFINE_SPINLOCK(drm_minor_lock); 49static DEFINE_SPINLOCK(drm_minor_lock);
@@ -633,8 +629,17 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
633 } 629 }
634 } 630 }
635 631
632 if (parent) {
633 ret = drm_dev_set_unique(dev, dev_name(parent));
634 if (ret)
635 goto err_setunique;
636 }
637
636 return dev; 638 return dev;
637 639
640err_setunique:
641 if (drm_core_check_feature(dev, DRIVER_GEM))
642 drm_gem_destroy(dev);
638err_ctxbitmap: 643err_ctxbitmap:
639 drm_legacy_ctxbitmap_cleanup(dev); 644 drm_legacy_ctxbitmap_cleanup(dev);
640 drm_ht_remove(&dev->map_hash); 645 drm_ht_remove(&dev->map_hash);
@@ -797,23 +802,18 @@ EXPORT_SYMBOL(drm_dev_unregister);
797/** 802/**
798 * drm_dev_set_unique - Set the unique name of a DRM device 803 * drm_dev_set_unique - Set the unique name of a DRM device
799 * @dev: device of which to set the unique name 804 * @dev: device of which to set the unique name
800 * @fmt: format string for unique name 805 * @name: unique name
801 * 806 *
802 * Sets the unique name of a DRM device using the specified format string and 807 * Sets the unique name of a DRM device using the specified string. Drivers
803 * a variable list of arguments. Drivers can use this at driver probe time if 808 * can use this at driver probe time if the unique name of the devices they
804 * the unique name of the devices they drive is static. 809 * drive is static.
805 * 810 *
806 * Return: 0 on success or a negative error code on failure. 811 * Return: 0 on success or a negative error code on failure.
807 */ 812 */
808int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...) 813int drm_dev_set_unique(struct drm_device *dev, const char *name)
809{ 814{
810 va_list ap;
811
812 kfree(dev->unique); 815 kfree(dev->unique);
813 816 dev->unique = kstrdup(name, GFP_KERNEL);
814 va_start(ap, fmt);
815 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
816 va_end(ap);
817 817
818 return dev->unique ? 0 : -ENOMEM; 818 return dev->unique ? 0 : -ENOMEM;
819} 819}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c214f1246cb4..04cb4877fabd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -637,8 +637,12 @@ static const struct minimode extra_modes[] = {
637/* 637/*
638 * Probably taken from CEA-861 spec. 638 * Probably taken from CEA-861 spec.
639 * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. 639 * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
640 *
641 * Index using the VIC.
640 */ 642 */
641static const struct drm_display_mode edid_cea_modes[] = { 643static const struct drm_display_mode edid_cea_modes[] = {
644 /* 0 - dummy, VICs start at 1 */
645 { },
642 /* 1 - 640x480@60Hz */ 646 /* 1 - 640x480@60Hz */
643 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 647 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
644 752, 800, 0, 480, 490, 492, 525, 0, 648 752, 800, 0, 480, 490, 492, 525, 0,
@@ -987,9 +991,11 @@ static const struct drm_display_mode edid_cea_modes[] = {
987}; 991};
988 992
989/* 993/*
990 * HDMI 1.4 4k modes. 994 * HDMI 1.4 4k modes. Index using the VIC.
991 */ 995 */
992static const struct drm_display_mode edid_4k_modes[] = { 996static const struct drm_display_mode edid_4k_modes[] = {
997 /* 0 - dummy, VICs start at 1 */
998 { },
993 /* 1 - 3840x2160@30Hz */ 999 /* 1 - 3840x2160@30Hz */
994 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 1000 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
995 3840, 4016, 4104, 4400, 0, 1001 3840, 4016, 4104, 4400, 0,
@@ -2548,13 +2554,13 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
2548static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, 2554static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
2549 unsigned int clock_tolerance) 2555 unsigned int clock_tolerance)
2550{ 2556{
2551 u8 mode; 2557 u8 vic;
2552 2558
2553 if (!to_match->clock) 2559 if (!to_match->clock)
2554 return 0; 2560 return 0;
2555 2561
2556 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { 2562 for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
2557 const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; 2563 const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
2558 unsigned int clock1, clock2; 2564 unsigned int clock1, clock2;
2559 2565
2560 /* Check both 60Hz and 59.94Hz */ 2566 /* Check both 60Hz and 59.94Hz */
@@ -2566,7 +2572,7 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
2566 continue; 2572 continue;
2567 2573
2568 if (drm_mode_equal_no_clocks(to_match, cea_mode)) 2574 if (drm_mode_equal_no_clocks(to_match, cea_mode))
2569 return mode + 1; 2575 return vic;
2570 } 2576 }
2571 2577
2572 return 0; 2578 return 0;
@@ -2581,13 +2587,13 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
2581 */ 2587 */
2582u8 drm_match_cea_mode(const struct drm_display_mode *to_match) 2588u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2583{ 2589{
2584 u8 mode; 2590 u8 vic;
2585 2591
2586 if (!to_match->clock) 2592 if (!to_match->clock)
2587 return 0; 2593 return 0;
2588 2594
2589 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { 2595 for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
2590 const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; 2596 const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
2591 unsigned int clock1, clock2; 2597 unsigned int clock1, clock2;
2592 2598
2593 /* Check both 60Hz and 59.94Hz */ 2599 /* Check both 60Hz and 59.94Hz */
@@ -2597,12 +2603,17 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2597 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2603 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2598 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2604 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2599 drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode)) 2605 drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
2600 return mode + 1; 2606 return vic;
2601 } 2607 }
2602 return 0; 2608 return 0;
2603} 2609}
2604EXPORT_SYMBOL(drm_match_cea_mode); 2610EXPORT_SYMBOL(drm_match_cea_mode);
2605 2611
2612static bool drm_valid_cea_vic(u8 vic)
2613{
2614 return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
2615}
2616
2606/** 2617/**
2607 * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to 2618 * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
2608 * the input VIC from the CEA mode list 2619 * the input VIC from the CEA mode list
@@ -2612,10 +2623,7 @@ EXPORT_SYMBOL(drm_match_cea_mode);
2612 */ 2623 */
2613enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) 2624enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
2614{ 2625{
2615 /* return picture aspect ratio for video_code - 1 to access the 2626 return edid_cea_modes[video_code].picture_aspect_ratio;
2616 * right array element
2617 */
2618 return edid_cea_modes[video_code-1].picture_aspect_ratio;
2619} 2627}
2620EXPORT_SYMBOL(drm_get_cea_aspect_ratio); 2628EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
2621 2629
@@ -2639,13 +2647,13 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
2639static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, 2647static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
2640 unsigned int clock_tolerance) 2648 unsigned int clock_tolerance)
2641{ 2649{
2642 u8 mode; 2650 u8 vic;
2643 2651
2644 if (!to_match->clock) 2652 if (!to_match->clock)
2645 return 0; 2653 return 0;
2646 2654
2647 for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { 2655 for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
2648 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; 2656 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
2649 unsigned int clock1, clock2; 2657 unsigned int clock1, clock2;
2650 2658
2651 /* Make sure to also match alternate clocks */ 2659 /* Make sure to also match alternate clocks */
@@ -2657,7 +2665,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
2657 continue; 2665 continue;
2658 2666
2659 if (drm_mode_equal_no_clocks(to_match, hdmi_mode)) 2667 if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
2660 return mode + 1; 2668 return vic;
2661 } 2669 }
2662 2670
2663 return 0; 2671 return 0;
@@ -2673,13 +2681,13 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
2673 */ 2681 */
2674static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) 2682static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2675{ 2683{
2676 u8 mode; 2684 u8 vic;
2677 2685
2678 if (!to_match->clock) 2686 if (!to_match->clock)
2679 return 0; 2687 return 0;
2680 2688
2681 for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { 2689 for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
2682 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; 2690 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
2683 unsigned int clock1, clock2; 2691 unsigned int clock1, clock2;
2684 2692
2685 /* Make sure to also match alternate clocks */ 2693 /* Make sure to also match alternate clocks */
@@ -2689,11 +2697,16 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2689 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2697 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2690 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2698 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2691 drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) 2699 drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
2692 return mode + 1; 2700 return vic;
2693 } 2701 }
2694 return 0; 2702 return 0;
2695} 2703}
2696 2704
2705static bool drm_valid_hdmi_vic(u8 vic)
2706{
2707 return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes);
2708}
2709
2697static int 2710static int
2698add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) 2711add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2699{ 2712{
@@ -2713,16 +2726,16 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2713 list_for_each_entry(mode, &connector->probed_modes, head) { 2726 list_for_each_entry(mode, &connector->probed_modes, head) {
2714 const struct drm_display_mode *cea_mode = NULL; 2727 const struct drm_display_mode *cea_mode = NULL;
2715 struct drm_display_mode *newmode; 2728 struct drm_display_mode *newmode;
2716 u8 mode_idx = drm_match_cea_mode(mode) - 1; 2729 u8 vic = drm_match_cea_mode(mode);
2717 unsigned int clock1, clock2; 2730 unsigned int clock1, clock2;
2718 2731
2719 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { 2732 if (drm_valid_cea_vic(vic)) {
2720 cea_mode = &edid_cea_modes[mode_idx]; 2733 cea_mode = &edid_cea_modes[vic];
2721 clock2 = cea_mode_alternate_clock(cea_mode); 2734 clock2 = cea_mode_alternate_clock(cea_mode);
2722 } else { 2735 } else {
2723 mode_idx = drm_match_hdmi_mode(mode) - 1; 2736 vic = drm_match_hdmi_mode(mode);
2724 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { 2737 if (drm_valid_hdmi_vic(vic)) {
2725 cea_mode = &edid_4k_modes[mode_idx]; 2738 cea_mode = &edid_4k_modes[vic];
2726 clock2 = hdmi_mode_alternate_clock(cea_mode); 2739 clock2 = hdmi_mode_alternate_clock(cea_mode);
2727 } 2740 }
2728 } 2741 }
@@ -2773,17 +2786,17 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
2773{ 2786{
2774 struct drm_device *dev = connector->dev; 2787 struct drm_device *dev = connector->dev;
2775 struct drm_display_mode *newmode; 2788 struct drm_display_mode *newmode;
2776 u8 cea_mode; 2789 u8 vic;
2777 2790
2778 if (video_db == NULL || video_index >= video_len) 2791 if (video_db == NULL || video_index >= video_len)
2779 return NULL; 2792 return NULL;
2780 2793
2781 /* CEA modes are numbered 1..127 */ 2794 /* CEA modes are numbered 1..127 */
2782 cea_mode = (video_db[video_index] & 127) - 1; 2795 vic = (video_db[video_index] & 127);
2783 if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) 2796 if (!drm_valid_cea_vic(vic))
2784 return NULL; 2797 return NULL;
2785 2798
2786 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); 2799 newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
2787 if (!newmode) 2800 if (!newmode)
2788 return NULL; 2801 return NULL;
2789 2802
@@ -2878,8 +2891,7 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
2878 struct drm_device *dev = connector->dev; 2891 struct drm_device *dev = connector->dev;
2879 struct drm_display_mode *newmode; 2892 struct drm_display_mode *newmode;
2880 2893
2881 vic--; /* VICs start at 1 */ 2894 if (!drm_valid_hdmi_vic(vic)) {
2882 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2883 DRM_ERROR("Unknown HDMI VIC: %d\n", vic); 2895 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2884 return 0; 2896 return 0;
2885 } 2897 }
@@ -3170,24 +3182,24 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
3170{ 3182{
3171 const struct drm_display_mode *cea_mode; 3183 const struct drm_display_mode *cea_mode;
3172 int clock1, clock2, clock; 3184 int clock1, clock2, clock;
3173 u8 mode_idx; 3185 u8 vic;
3174 const char *type; 3186 const char *type;
3175 3187
3176 /* 3188 /*
3177 * allow 5kHz clock difference either way to account for 3189 * allow 5kHz clock difference either way to account for
3178 * the 10kHz clock resolution limit of detailed timings. 3190 * the 10kHz clock resolution limit of detailed timings.
3179 */ 3191 */
3180 mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1; 3192 vic = drm_match_cea_mode_clock_tolerance(mode, 5);
3181 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { 3193 if (drm_valid_cea_vic(vic)) {
3182 type = "CEA"; 3194 type = "CEA";
3183 cea_mode = &edid_cea_modes[mode_idx]; 3195 cea_mode = &edid_cea_modes[vic];
3184 clock1 = cea_mode->clock; 3196 clock1 = cea_mode->clock;
3185 clock2 = cea_mode_alternate_clock(cea_mode); 3197 clock2 = cea_mode_alternate_clock(cea_mode);
3186 } else { 3198 } else {
3187 mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1; 3199 vic = drm_match_hdmi_mode_clock_tolerance(mode, 5);
3188 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { 3200 if (drm_valid_hdmi_vic(vic)) {
3189 type = "HDMI"; 3201 type = "HDMI";
3190 cea_mode = &edid_4k_modes[mode_idx]; 3202 cea_mode = &edid_4k_modes[vic];
3191 clock1 = cea_mode->clock; 3203 clock1 = cea_mode->clock;
3192 clock2 = hdmi_mode_alternate_clock(cea_mode); 3204 clock2 = hdmi_mode_alternate_clock(cea_mode);
3193 } else { 3205 } else {
@@ -3205,7 +3217,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
3205 return; 3217 return;
3206 3218
3207 DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n", 3219 DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
3208 type, mode_idx + 1, mode->clock, clock); 3220 type, vic, mode->clock, clock);
3209 mode->clock = clock; 3221 mode->clock = clock;
3210} 3222}
3211 3223
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index d18b88b755c3..e8629076de32 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(drm_i2c_encoder_destroy);
124 * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: 124 * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
125 */ 125 */
126 126
127static inline struct drm_encoder_slave_funcs * 127static inline const struct drm_encoder_slave_funcs *
128get_slave_funcs(struct drm_encoder *enc) 128get_slave_funcs(struct drm_encoder *enc)
129{ 129{
130 return to_encoder_slave(enc)->slave_funcs; 130 return to_encoder_slave(enc)->slave_funcs;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index b7d5b848d2f8..c895b6fddbd8 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -266,7 +266,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
266 fbi = drm_fb_helper_alloc_fbi(helper); 266 fbi = drm_fb_helper_alloc_fbi(helper);
267 if (IS_ERR(fbi)) { 267 if (IS_ERR(fbi)) {
268 ret = PTR_ERR(fbi); 268 ret = PTR_ERR(fbi);
269 goto err_drm_gem_cma_free_object; 269 goto err_gem_free_object;
270 } 270 }
271 271
272 fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1); 272 fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
@@ -299,8 +299,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
299 299
300err_fb_info_destroy: 300err_fb_info_destroy:
301 drm_fb_helper_release_fbi(helper); 301 drm_fb_helper_release_fbi(helper);
302err_drm_gem_cma_free_object: 302err_gem_free_object:
303 drm_gem_cma_free_object(&obj->base); 303 dev->driver->gem_free_object(&obj->base);
304 return ret; 304 return ret;
305} 305}
306 306
@@ -348,9 +348,6 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
348 348
349 } 349 }
350 350
351 /* disable all the possible outputs/crtcs before entering KMS mode */
352 drm_helper_disable_unused_functions(dev);
353
354 ret = drm_fb_helper_initial_config(helper, preferred_bpp); 351 ret = drm_fb_helper_initial_config(helper, preferred_bpp);
355 if (ret < 0) { 352 if (ret < 0) {
356 dev_err(dev->dev, "Failed to set initial hw configuration.\n"); 353 dev_err(dev->dev, "Failed to set initial hw configuration.\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 69cbab5e5c81..1e103c4c6ee0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1251,7 +1251,7 @@ retry:
1251 goto fail; 1251 goto fail;
1252 1252
1253 plane = mode_set->crtc->primary; 1253 plane = mode_set->crtc->primary;
1254 plane_mask |= drm_plane_index(plane); 1254 plane_mask |= (1 << drm_plane_index(plane));
1255 plane->old_fb = plane->fb; 1255 plane->old_fb = plane->fb;
1256 } 1256 }
1257 1257
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e10bba4468b..2e8c77e71e1f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -220,6 +220,9 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
220static void 220static void
221drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) 221drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
222{ 222{
223 struct drm_device *dev = obj->dev;
224 bool final = false;
225
223 if (WARN_ON(obj->handle_count == 0)) 226 if (WARN_ON(obj->handle_count == 0))
224 return; 227 return;
225 228
@@ -229,14 +232,39 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
229 * checked for a name 232 * checked for a name
230 */ 233 */
231 234
232 mutex_lock(&obj->dev->object_name_lock); 235 mutex_lock(&dev->object_name_lock);
233 if (--obj->handle_count == 0) { 236 if (--obj->handle_count == 0) {
234 drm_gem_object_handle_free(obj); 237 drm_gem_object_handle_free(obj);
235 drm_gem_object_exported_dma_buf_free(obj); 238 drm_gem_object_exported_dma_buf_free(obj);
239 final = true;
236 } 240 }
237 mutex_unlock(&obj->dev->object_name_lock); 241 mutex_unlock(&dev->object_name_lock);
238 242
239 drm_gem_object_unreference_unlocked(obj); 243 if (final)
244 drm_gem_object_unreference_unlocked(obj);
245}
246
247/*
248 * Called at device or object close to release the file's
249 * handle references on objects.
250 */
251static int
252drm_gem_object_release_handle(int id, void *ptr, void *data)
253{
254 struct drm_file *file_priv = data;
255 struct drm_gem_object *obj = ptr;
256 struct drm_device *dev = obj->dev;
257
258 if (drm_core_check_feature(dev, DRIVER_PRIME))
259 drm_gem_remove_prime_handles(obj, file_priv);
260 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
261
262 if (dev->driver->gem_close_object)
263 dev->driver->gem_close_object(obj, file_priv);
264
265 drm_gem_object_handle_unreference_unlocked(obj);
266
267 return 0;
240} 268}
241 269
242/** 270/**
@@ -277,14 +305,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
277 idr_remove(&filp->object_idr, handle); 305 idr_remove(&filp->object_idr, handle);
278 spin_unlock(&filp->table_lock); 306 spin_unlock(&filp->table_lock);
279 307
280 if (drm_core_check_feature(dev, DRIVER_PRIME)) 308 drm_gem_object_release_handle(handle, obj, filp);
281 drm_gem_remove_prime_handles(obj, filp);
282 drm_vma_node_revoke(&obj->vma_node, filp->filp);
283
284 if (dev->driver->gem_close_object)
285 dev->driver->gem_close_object(obj, filp);
286 drm_gem_object_handle_unreference_unlocked(obj);
287
288 return 0; 309 return 0;
289} 310}
290EXPORT_SYMBOL(drm_gem_handle_delete); 311EXPORT_SYMBOL(drm_gem_handle_delete);
@@ -326,9 +347,12 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
326 u32 *handlep) 347 u32 *handlep)
327{ 348{
328 struct drm_device *dev = obj->dev; 349 struct drm_device *dev = obj->dev;
350 u32 handle;
329 int ret; 351 int ret;
330 352
331 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 353 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
354 if (obj->handle_count++ == 0)
355 drm_gem_object_reference(obj);
332 356
333 /* 357 /*
334 * Get the user-visible handle using idr. Preload and perform 358 * Get the user-visible handle using idr. Preload and perform
@@ -338,32 +362,38 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
338 spin_lock(&file_priv->table_lock); 362 spin_lock(&file_priv->table_lock);
339 363
340 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 364 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
341 drm_gem_object_reference(obj); 365
342 obj->handle_count++;
343 spin_unlock(&file_priv->table_lock); 366 spin_unlock(&file_priv->table_lock);
344 idr_preload_end(); 367 idr_preload_end();
368
345 mutex_unlock(&dev->object_name_lock); 369 mutex_unlock(&dev->object_name_lock);
346 if (ret < 0) { 370 if (ret < 0)
347 drm_gem_object_handle_unreference_unlocked(obj); 371 goto err_unref;
348 return ret; 372
349 } 373 handle = ret;
350 *handlep = ret;
351 374
352 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); 375 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
353 if (ret) { 376 if (ret)
354 drm_gem_handle_delete(file_priv, *handlep); 377 goto err_remove;
355 return ret;
356 }
357 378
358 if (dev->driver->gem_open_object) { 379 if (dev->driver->gem_open_object) {
359 ret = dev->driver->gem_open_object(obj, file_priv); 380 ret = dev->driver->gem_open_object(obj, file_priv);
360 if (ret) { 381 if (ret)
361 drm_gem_handle_delete(file_priv, *handlep); 382 goto err_revoke;
362 return ret;
363 }
364 } 383 }
365 384
385 *handlep = handle;
366 return 0; 386 return 0;
387
388err_revoke:
389 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
390err_remove:
391 spin_lock(&file_priv->table_lock);
392 idr_remove(&file_priv->object_idr, handle);
393 spin_unlock(&file_priv->table_lock);
394err_unref:
395 drm_gem_object_handle_unreference_unlocked(obj);
396 return ret;
367} 397}
368 398
369/** 399/**
@@ -630,7 +660,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
630 return -ENOENT; 660 return -ENOENT;
631 661
632 mutex_lock(&dev->object_name_lock); 662 mutex_lock(&dev->object_name_lock);
633 idr_preload(GFP_KERNEL);
634 /* prevent races with concurrent gem_close. */ 663 /* prevent races with concurrent gem_close. */
635 if (obj->handle_count == 0) { 664 if (obj->handle_count == 0) {
636 ret = -ENOENT; 665 ret = -ENOENT;
@@ -638,7 +667,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
638 } 667 }
639 668
640 if (!obj->name) { 669 if (!obj->name) {
641 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 670 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
642 if (ret < 0) 671 if (ret < 0)
643 goto err; 672 goto err;
644 673
@@ -649,7 +678,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
649 ret = 0; 678 ret = 0;
650 679
651err: 680err:
652 idr_preload_end();
653 mutex_unlock(&dev->object_name_lock); 681 mutex_unlock(&dev->object_name_lock);
654 drm_gem_object_unreference_unlocked(obj); 682 drm_gem_object_unreference_unlocked(obj);
655 return ret; 683 return ret;
@@ -714,29 +742,6 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
714 spin_lock_init(&file_private->table_lock); 742 spin_lock_init(&file_private->table_lock);
715} 743}
716 744
717/*
718 * Called at device close to release the file's
719 * handle references on objects.
720 */
721static int
722drm_gem_object_release_handle(int id, void *ptr, void *data)
723{
724 struct drm_file *file_priv = data;
725 struct drm_gem_object *obj = ptr;
726 struct drm_device *dev = obj->dev;
727
728 if (drm_core_check_feature(dev, DRIVER_PRIME))
729 drm_gem_remove_prime_handles(obj, file_priv);
730 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
731
732 if (dev->driver->gem_close_object)
733 dev->driver->gem_close_object(obj, file_priv);
734
735 drm_gem_object_handle_unreference_unlocked(obj);
736
737 return 0;
738}
739
740/** 745/**
741 * drm_gem_release - release file-private GEM resources 746 * drm_gem_release - release file-private GEM resources
742 * @dev: drm_device which is being closed by userspace 747 * @dev: drm_device which is being closed by userspace
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e109b49cd25d..e5df53b6e229 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -59,11 +59,13 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size)
59 struct drm_gem_object *gem_obj; 59 struct drm_gem_object *gem_obj;
60 int ret; 60 int ret;
61 61
62 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); 62 if (drm->driver->gem_create_object)
63 if (!cma_obj) 63 gem_obj = drm->driver->gem_create_object(drm, size);
64 else
65 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
66 if (!gem_obj)
64 return ERR_PTR(-ENOMEM); 67 return ERR_PTR(-ENOMEM);
65 68 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
66 gem_obj = &cma_obj->base;
67 69
68 ret = drm_gem_object_init(drm, gem_obj, size); 70 ret = drm_gem_object_init(drm, gem_obj, size);
69 if (ret) 71 if (ret)
@@ -119,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
119 return cma_obj; 121 return cma_obj;
120 122
121error: 123error:
122 drm_gem_cma_free_object(&cma_obj->base); 124 drm->driver->gem_free_object(&cma_obj->base);
123 return ERR_PTR(ret); 125 return ERR_PTR(ret);
124} 126}
125EXPORT_SYMBOL_GPL(drm_gem_cma_create); 127EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -169,7 +171,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
169 return cma_obj; 171 return cma_obj;
170 172
171err_handle_create: 173err_handle_create:
172 drm_gem_cma_free_object(gem_obj); 174 drm->driver->gem_free_object(gem_obj);
173 175
174 return ERR_PTR(ret); 176 return ERR_PTR(ret);
175} 177}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 607f493ae801..d12a4efa651b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -73,6 +73,9 @@ static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
73module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 73module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
74module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 74module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
76MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
77MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
78MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
76 79
77static void store_vblank(struct drm_device *dev, unsigned int pipe, 80static void store_vblank(struct drm_device *dev, unsigned int pipe,
78 u32 vblank_count_inc, 81 u32 vblank_count_inc,
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2d5ca8eec13a..6e6a9c58d404 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -365,6 +365,44 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
365} 365}
366EXPORT_SYMBOL(mipi_dsi_create_packet); 366EXPORT_SYMBOL(mipi_dsi_create_packet);
367 367
368/**
369 * mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command
370 * @dsi: DSI peripheral device
371 *
372 * Return: 0 on success or a negative error code on failure.
373 */
374int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi)
375{
376 struct mipi_dsi_msg msg = {
377 .channel = dsi->channel,
378 .type = MIPI_DSI_SHUTDOWN_PERIPHERAL,
379 .tx_buf = (u8 [2]) { 0, 0 },
380 .tx_len = 2,
381 };
382
383 return mipi_dsi_device_transfer(dsi, &msg);
384}
385EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
386
387/**
388 * mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
389 * @dsi: DSI peripheral device
390 *
391 * Return: 0 on success or a negative error code on failure.
392 */
393int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
394{
395 struct mipi_dsi_msg msg = {
396 .channel = dsi->channel,
397 .type = MIPI_DSI_TURN_ON_PERIPHERAL,
398 .tx_buf = (u8 [2]) { 0, 0 },
399 .tx_len = 2,
400 };
401
402 return mipi_dsi_device_transfer(dsi, &msg);
403}
404EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
405
368/* 406/*
369 * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the 407 * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
370 * the payload in a long packet transmitted from the peripheral back to the 408 * the payload in a long packet transmitted from the peripheral back to the
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ef6bd3656548..20775c05235a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,10 +553,10 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
553 * drivers/video/fbmon.c 553 * drivers/video/fbmon.c
554 * 554 *
555 * Standard GTF parameters: 555 * Standard GTF parameters:
556 * M = 600 556 * M = 600
557 * C = 40 557 * C = 40
558 * K = 128 558 * K = 128
559 * J = 20 559 * J = 20
560 * 560 *
561 * Returns: 561 * Returns:
562 * The modeline based on the GTF algorithm stored in a drm_display_mode object. 562 * The modeline based on the GTF algorithm stored in a drm_display_mode object.
@@ -708,7 +708,8 @@ void drm_mode_set_name(struct drm_display_mode *mode)
708} 708}
709EXPORT_SYMBOL(drm_mode_set_name); 709EXPORT_SYMBOL(drm_mode_set_name);
710 710
711/** drm_mode_hsync - get the hsync of a mode 711/**
712 * drm_mode_hsync - get the hsync of a mode
712 * @mode: mode 713 * @mode: mode
713 * 714 *
714 * Returns: 715 * Returns:
@@ -1073,7 +1074,7 @@ static const char * const drm_mode_status_names[] = {
1073 MODE_STATUS(ONE_SIZE), 1074 MODE_STATUS(ONE_SIZE),
1074 MODE_STATUS(NO_REDUCED), 1075 MODE_STATUS(NO_REDUCED),
1075 MODE_STATUS(NO_STEREO), 1076 MODE_STATUS(NO_STEREO),
1076 MODE_STATUS(UNVERIFIED), 1077 MODE_STATUS(STALE),
1077 MODE_STATUS(BAD), 1078 MODE_STATUS(BAD),
1078 MODE_STATUS(ERROR), 1079 MODE_STATUS(ERROR),
1079}; 1080};
@@ -1171,7 +1172,6 @@ EXPORT_SYMBOL(drm_mode_sort);
1171/** 1172/**
1172 * drm_mode_connector_list_update - update the mode list for the connector 1173 * drm_mode_connector_list_update - update the mode list for the connector
1173 * @connector: the connector to update 1174 * @connector: the connector to update
1174 * @merge_type_bits: whether to merge or overwrite type bits
1175 * 1175 *
1176 * This moves the modes from the @connector probed_modes list 1176 * This moves the modes from the @connector probed_modes list
1177 * to the actual mode list. It compares the probed mode against the current 1177 * to the actual mode list. It compares the probed mode against the current
@@ -1180,33 +1180,48 @@ EXPORT_SYMBOL(drm_mode_sort);
1180 * This is just a helper functions doesn't validate any modes itself and also 1180 * This is just a helper functions doesn't validate any modes itself and also
1181 * doesn't prune any invalid modes. Callers need to do that themselves. 1181 * doesn't prune any invalid modes. Callers need to do that themselves.
1182 */ 1182 */
1183void drm_mode_connector_list_update(struct drm_connector *connector, 1183void drm_mode_connector_list_update(struct drm_connector *connector)
1184 bool merge_type_bits)
1185{ 1184{
1186 struct drm_display_mode *mode;
1187 struct drm_display_mode *pmode, *pt; 1185 struct drm_display_mode *pmode, *pt;
1188 int found_it;
1189 1186
1190 WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex)); 1187 WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
1191 1188
1192 list_for_each_entry_safe(pmode, pt, &connector->probed_modes, 1189 list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) {
1193 head) { 1190 struct drm_display_mode *mode;
1194 found_it = 0; 1191 bool found_it = false;
1192
1195 /* go through current modes checking for the new probed mode */ 1193 /* go through current modes checking for the new probed mode */
1196 list_for_each_entry(mode, &connector->modes, head) { 1194 list_for_each_entry(mode, &connector->modes, head) {
1197 if (drm_mode_equal(pmode, mode)) { 1195 if (!drm_mode_equal(pmode, mode))
1198 found_it = 1; 1196 continue;
1199 /* if equal delete the probed mode */ 1197
1200 mode->status = pmode->status; 1198 found_it = true;
1201 /* Merge type bits together */ 1199
1202 if (merge_type_bits) 1200 /*
1203 mode->type |= pmode->type; 1201 * If the old matching mode is stale (ie. left over
1204 else 1202 * from a previous probe) just replace it outright.
1205 mode->type = pmode->type; 1203 * Otherwise just merge the type bits between all
1206 list_del(&pmode->head); 1204 * equal probed modes.
1207 drm_mode_destroy(connector->dev, pmode); 1205 *
1208 break; 1206 * If two probed modes are considered equal, pick the
1207 * actual timings from the one that's marked as
1208 * preferred (in case the match isn't 100%). If
1209 * multiple or zero preferred modes are present, favor
1210 * the mode added to the probed_modes list first.
1211 */
1212 if (mode->status == MODE_STALE) {
1213 drm_mode_copy(mode, pmode);
1214 } else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 &&
1215 (pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) {
1216 pmode->type |= mode->type;
1217 drm_mode_copy(mode, pmode);
1218 } else {
1219 mode->type |= pmode->type;
1209 } 1220 }
1221
1222 list_del(&pmode->head);
1223 drm_mode_destroy(connector->dev, pmode);
1224 break;
1210 } 1225 }
1211 1226
1212 if (!found_it) { 1227 if (!found_it) {
@@ -1229,7 +1244,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update);
1229 * This uses the same parameters as the fb modedb.c, except for an extra 1244 * This uses the same parameters as the fb modedb.c, except for an extra
1230 * force-enable, force-enable-digital and force-disable bit at the end: 1245 * force-enable, force-enable-digital and force-disable bit at the end:
1231 * 1246 *
1232 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] 1247 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
1233 * 1248 *
1234 * The intermediate drm_cmdline_mode structure is required to store additional 1249 * The intermediate drm_cmdline_mode structure is required to store additional
1235 * options from the command line modline like the force-enable/disable flag. 1250 * options from the command line modline like the force-enable/disable flag.
@@ -1247,7 +1262,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1247 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; 1262 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
1248 bool yres_specified = false, cvt = false, rb = false; 1263 bool yres_specified = false, cvt = false, rb = false;
1249 bool interlace = false, margins = false, was_digit = false; 1264 bool interlace = false, margins = false, was_digit = false;
1250 int i, err; 1265 int i;
1251 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 1266 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
1252 1267
1253#ifdef CONFIG_FB 1268#ifdef CONFIG_FB
@@ -1267,9 +1282,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1267 case '@': 1282 case '@':
1268 if (!refresh_specified && !bpp_specified && 1283 if (!refresh_specified && !bpp_specified &&
1269 !yres_specified && !cvt && !rb && was_digit) { 1284 !yres_specified && !cvt && !rb && was_digit) {
1270 err = kstrtouint(&name[i + 1], 10, &refresh); 1285 refresh = simple_strtol(&name[i+1], NULL, 10);
1271 if (err)
1272 return false;
1273 refresh_specified = true; 1286 refresh_specified = true;
1274 was_digit = false; 1287 was_digit = false;
1275 } else 1288 } else
@@ -1278,9 +1291,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1278 case '-': 1291 case '-':
1279 if (!bpp_specified && !yres_specified && !cvt && 1292 if (!bpp_specified && !yres_specified && !cvt &&
1280 !rb && was_digit) { 1293 !rb && was_digit) {
1281 err = kstrtouint(&name[i + 1], 10, &bpp); 1294 bpp = simple_strtol(&name[i+1], NULL, 10);
1282 if (err)
1283 return false;
1284 bpp_specified = true; 1295 bpp_specified = true;
1285 was_digit = false; 1296 was_digit = false;
1286 } else 1297 } else
@@ -1288,9 +1299,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1288 break; 1299 break;
1289 case 'x': 1300 case 'x':
1290 if (!yres_specified && was_digit) { 1301 if (!yres_specified && was_digit) {
1291 err = kstrtouint(&name[i + 1], 10, &yres); 1302 yres = simple_strtol(&name[i+1], NULL, 10);
1292 if (err)
1293 return false;
1294 yres_specified = true; 1303 yres_specified = true;
1295 was_digit = false; 1304 was_digit = false;
1296 } else 1305 } else
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c2f5971146ba..e3a4adf03e7b 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -40,17 +40,15 @@
40 * The basic usage pattern is to: 40 * The basic usage pattern is to:
41 * 41 *
42 * drm_modeset_acquire_init(&ctx) 42 * drm_modeset_acquire_init(&ctx)
43 * retry: 43 * retry:
44 * foreach (lock in random_ordered_set_of_locks) { 44 * foreach (lock in random_ordered_set_of_locks) {
45 * ret = drm_modeset_lock(lock, &ctx) 45 * ret = drm_modeset_lock(lock, &ctx)
46 * if (ret == -EDEADLK) { 46 * if (ret == -EDEADLK) {
47 * drm_modeset_backoff(&ctx); 47 * drm_modeset_backoff(&ctx);
48 * goto retry; 48 * goto retry;
49 * } 49 * }
50 * } 50 * }
51 *
52 * ... do stuff ... 51 * ... do stuff ...
53 *
54 * drm_modeset_drop_locks(&ctx); 52 * drm_modeset_drop_locks(&ctx);
55 * drm_modeset_acquire_fini(&ctx); 53 * drm_modeset_acquire_fini(&ctx);
56 */ 54 */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index fcd2a86acd2c..a1fff1179a97 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -410,6 +410,26 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
410} 410}
411EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); 411EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
412 412
413int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
414{
415 struct pci_dev *root;
416 u32 lnkcap;
417
418 *mlw = 0;
419 if (!dev->pdev)
420 return -EINVAL;
421
422 root = dev->pdev->bus->self;
423
424 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
425
426 *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
427
428 DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
429 return 0;
430}
431EXPORT_SYMBOL(drm_pcie_get_max_link_width);
432
413#else 433#else
414 434
415int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 435int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index a6983d41920d..369d2898ff9e 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -57,6 +57,10 @@
57 * by the atomic helpers. 57 * by the atomic helpers.
58 * 58 *
59 * Again drivers are strongly urged to switch to the new interfaces. 59 * Again drivers are strongly urged to switch to the new interfaces.
60 *
61 * The plane helpers share the function table structures with other helpers,
62 * specifically also the atomic helpers. See struct &drm_plane_helper_funcs for
63 * the details.
60 */ 64 */
61 65
62/* 66/*
@@ -371,7 +375,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
371 &drm_primary_helper_funcs, 375 &drm_primary_helper_funcs,
372 safe_modeset_formats, 376 safe_modeset_formats,
373 ARRAY_SIZE(safe_modeset_formats), 377 ARRAY_SIZE(safe_modeset_formats),
374 DRM_PLANE_TYPE_PRIMARY); 378 DRM_PLANE_TYPE_PRIMARY, NULL);
375 if (ret) { 379 if (ret) {
376 kfree(primary); 380 kfree(primary);
377 primary = NULL; 381 primary = NULL;
@@ -398,7 +402,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
398 struct drm_plane *primary; 402 struct drm_plane *primary;
399 403
400 primary = create_primary_plane(dev); 404 primary = create_primary_plane(dev);
401 return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); 405 return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
406 NULL);
402} 407}
403EXPORT_SYMBOL(drm_crtc_init); 408EXPORT_SYMBOL(drm_crtc_init);
404 409
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9f935f55d74c..27aa7183b20b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -313,19 +313,15 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
313 * 313 *
314 * Export callbacks: 314 * Export callbacks:
315 * 315 *
316 * - @gem_prime_pin (optional): prepare a GEM object for exporting 316 * * @gem_prime_pin (optional): prepare a GEM object for exporting
317 * 317 * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
318 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 318 * * @gem_prime_vmap: vmap a buffer exported by your driver
319 * 319 * * @gem_prime_vunmap: vunmap a buffer exported by your driver
320 * - @gem_prime_vmap: vmap a buffer exported by your driver 320 * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
321 *
322 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
323 *
324 * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
325 * 321 *
326 * Import callback: 322 * Import callback:
327 * 323 *
328 * - @gem_prime_import_sg_table (import): produce a GEM object from another 324 * * @gem_prime_import_sg_table (import): produce a GEM object from another
329 * driver's scatter/gather table 325 * driver's scatter/gather table
330 */ 326 */
331 327
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index eee3b6f38cfb..e714b5a7955f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -53,6 +53,9 @@
53 * This helper library can be used independently of the modeset helper library. 53 * This helper library can be used independently of the modeset helper library.
54 * Drivers can also overwrite different parts e.g. use their own hotplug 54 * Drivers can also overwrite different parts e.g. use their own hotplug
55 * handling code to avoid probing unrelated outputs. 55 * handling code to avoid probing unrelated outputs.
56 *
57 * The probe helpers share the function table structures with other display
58 * helper libraries. See struct &drm_connector_helper_funcs for the details.
56 */ 59 */
57 60
58static bool drm_kms_helper_poll = true; 61static bool drm_kms_helper_poll = true;
@@ -126,9 +129,64 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
126} 129}
127EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); 130EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
128 131
129 132/**
130static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, 133 * drm_helper_probe_single_connector_modes - get complete set of display modes
131 uint32_t maxX, uint32_t maxY, bool merge_type_bits) 134 * @connector: connector to probe
135 * @maxX: max width for modes
136 * @maxY: max height for modes
137 *
138 * Based on the helper callbacks implemented by @connector in struct
139 * &drm_connector_helper_funcs try to detect all valid modes. Modes will first
140 * be added to the connector's probed_modes list, then culled (based on validity
141 * and the @maxX, @maxY parameters) and put into the normal modes list.
142 *
143 * Intended to be used as a generic implementation of the ->fill_modes()
144 * @connector vfunc for drivers that use the CRTC helpers for output mode
145 * filtering and detection.
146 *
147 * The basic procedure is as follows
148 *
149 * 1. All modes currently on the connector's modes list are marked as stale
150 *
151 * 2. New modes are added to the connector's probed_modes list with
152 * drm_mode_probed_add(). New modes start their life with status as OK.
153 * Modes are added from a single source using the following priority order.
154 *
155 * - debugfs 'override_edid' (used for testing only)
156 * - firmware EDID (drm_load_edid_firmware())
157 * - connector helper ->get_modes() vfunc
158 * - if the connector status is connector_status_connected, standard
159 * VESA DMT modes up to 1024x768 are automatically added
160 * (drm_add_modes_noedid())
161 *
162 * Finally modes specified via the kernel command line (video=...) are
163 * added in addition to what the earlier probes produced
164 * (drm_helper_probe_add_cmdline_mode()). These modes are generated
165 * using the VESA GTF/CVT formulas.
166 *
167 * 3. Modes are moved from the probed_modes list to the modes list. Potential
168 * duplicates are merged together (see drm_mode_connector_list_update()).
169 * After this step the probed_modes list will be empty again.
170 *
171 * 4. Any non-stale mode on the modes list then undergoes validation
172 *
173 * - drm_mode_validate_basic() performs basic sanity checks
174 * - drm_mode_validate_size() filters out modes larger than @maxX and @maxY
175 * (if specified)
176 * - drm_mode_validate_flag() checks the modes againt basic connector
177 * capabilites (interlace_allowed,doublescan_allowed,stereo_allowed)
178 * - the optional connector ->mode_valid() helper can perform driver and/or
179 * hardware specific checks
180 *
181 * 5. Any mode whose status is not OK is pruned from the connector's modes list,
182 * accompanied by a debug message indicating the reason for the mode's
183 * rejection (see drm_mode_prune_invalid()).
184 *
185 * Returns:
186 * The number of modes found on @connector.
187 */
188int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
189 uint32_t maxX, uint32_t maxY)
132{ 190{
133 struct drm_device *dev = connector->dev; 191 struct drm_device *dev = connector->dev;
134 struct drm_display_mode *mode; 192 struct drm_display_mode *mode;
@@ -143,9 +201,9 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
143 201
144 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 202 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
145 connector->name); 203 connector->name);
146 /* set all modes to the unverified state */ 204 /* set all old modes to the stale state */
147 list_for_each_entry(mode, &connector->modes, head) 205 list_for_each_entry(mode, &connector->modes, head)
148 mode->status = MODE_UNVERIFIED; 206 mode->status = MODE_STALE;
149 207
150 old_status = connector->status; 208 old_status = connector->status;
151 209
@@ -200,17 +258,16 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
200 goto prune; 258 goto prune;
201 } 259 }
202 260
261 if (connector->override_edid) {
262 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
263
264 count = drm_add_edid_modes(connector, edid);
265 drm_edid_to_eld(connector, edid);
266 } else {
203#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE 267#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
204 count = drm_load_edid_firmware(connector); 268 count = drm_load_edid_firmware(connector);
205 if (count == 0) 269 if (count == 0)
206#endif 270#endif
207 {
208 if (connector->override_edid) {
209 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
210
211 count = drm_add_edid_modes(connector, edid);
212 drm_edid_to_eld(connector, edid);
213 } else
214 count = (*connector_funcs->get_modes)(connector); 271 count = (*connector_funcs->get_modes)(connector);
215 } 272 }
216 273
@@ -220,7 +277,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
220 if (count == 0) 277 if (count == 0)
221 goto prune; 278 goto prune;
222 279
223 drm_mode_connector_list_update(connector, merge_type_bits); 280 drm_mode_connector_list_update(connector);
224 281
225 if (connector->interlace_allowed) 282 if (connector->interlace_allowed)
226 mode_flags |= DRM_MODE_FLAG_INTERLACE; 283 mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -230,7 +287,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
230 mode_flags |= DRM_MODE_FLAG_3D_MASK; 287 mode_flags |= DRM_MODE_FLAG_3D_MASK;
231 288
232 list_for_each_entry(mode, &connector->modes, head) { 289 list_for_each_entry(mode, &connector->modes, head) {
233 mode->status = drm_mode_validate_basic(mode); 290 if (mode->status == MODE_OK)
291 mode->status = drm_mode_validate_basic(mode);
234 292
235 if (mode->status == MODE_OK) 293 if (mode->status == MODE_OK)
236 mode->status = drm_mode_validate_size(mode, maxX, maxY); 294 mode->status = drm_mode_validate_size(mode, maxX, maxY);
@@ -263,49 +321,9 @@ prune:
263 321
264 return count; 322 return count;
265} 323}
266
267/**
268 * drm_helper_probe_single_connector_modes - get complete set of display modes
269 * @connector: connector to probe
270 * @maxX: max width for modes
271 * @maxY: max height for modes
272 *
273 * Based on the helper callbacks implemented by @connector try to detect all
274 * valid modes. Modes will first be added to the connector's probed_modes list,
275 * then culled (based on validity and the @maxX, @maxY parameters) and put into
276 * the normal modes list.
277 *
278 * Intended to be use as a generic implementation of the ->fill_modes()
279 * @connector vfunc for drivers that use the crtc helpers for output mode
280 * filtering and detection.
281 *
282 * Returns:
283 * The number of modes found on @connector.
284 */
285int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
286 uint32_t maxX, uint32_t maxY)
287{
288 return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
289}
290EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); 324EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
291 325
292/** 326/**
293 * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
294 * @connector: connector to probe
295 * @maxX: max width for modes
296 * @maxY: max height for modes
297 *
298 * This operates like drm_hehlper_probe_single_connector_modes except it
299 * replaces the mode bits instead of merging them for preferred modes.
300 */
301int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
302 uint32_t maxX, uint32_t maxY)
303{
304 return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
305}
306EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
307
308/**
309 * drm_kms_helper_hotplug_event - fire off KMS hotplug events 327 * drm_kms_helper_hotplug_event - fire off KMS hotplug events
310 * @dev: drm_device whose connector state changed 328 * @dev: drm_device whose connector state changed
311 * 329 *
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 0ca64106a97b..d503f8e8c2d1 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -240,7 +240,7 @@ static ssize_t edid_show(struct file *filp, struct kobject *kobj,
240 struct bin_attribute *attr, char *buf, loff_t off, 240 struct bin_attribute *attr, char *buf, loff_t off,
241 size_t count) 241 size_t count)
242{ 242{
243 struct device *connector_dev = container_of(kobj, struct device, kobj); 243 struct device *connector_dev = kobj_to_dev(kobj);
244 struct drm_connector *connector = to_drm_connector(connector_dev); 244 struct drm_connector *connector = to_drm_connector(connector_dev);
245 unsigned char *edid; 245 unsigned char *edid;
246 size_t size; 246 size_t size;
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
new file mode 100644
index 000000000000..2cde7a5442fb
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -0,0 +1,20 @@
1
2config DRM_ETNAVIV
3 tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
4 depends on DRM
5 depends on ARCH_MXC || ARCH_DOVE
6 select SHMEM
7 select TMPFS
8 select IOMMU_API
9 select IOMMU_SUPPORT
10 select WANT_DEV_COREDUMP
11 help
12 DRM driver for Vivante GPUs.
13
14config DRM_ETNAVIV_REGISTER_LOGGING
15 bool "enable ETNAVIV register logging"
16 depends on DRM_ETNAVIV
17 help
18 Compile in support for logging register reads/writes in a format
19 that can be parsed by envytools demsm tool. If enabled, register
20 logging can be switched on via etnaviv.reglog=y module param.
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
new file mode 100644
index 000000000000..1086e9876f91
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -0,0 +1,14 @@
1etnaviv-y := \
2 etnaviv_buffer.o \
3 etnaviv_cmd_parser.o \
4 etnaviv_drv.o \
5 etnaviv_dump.o \
6 etnaviv_gem_prime.o \
7 etnaviv_gem_submit.o \
8 etnaviv_gem.o \
9 etnaviv_gpu.o \
10 etnaviv_iommu_v2.o \
11 etnaviv_iommu.o \
12 etnaviv_mmu.o
13
14obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
new file mode 100644
index 000000000000..8c44ba9a694e
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
@@ -0,0 +1,218 @@
1#ifndef CMDSTREAM_XML
2#define CMDSTREAM_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56)
12- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
13
14Copyright (C) 2014
15*/
16
17
18#define FE_OPCODE_LOAD_STATE 0x00000001
19#define FE_OPCODE_END 0x00000002
20#define FE_OPCODE_NOP 0x00000003
21#define FE_OPCODE_DRAW_2D 0x00000004
22#define FE_OPCODE_DRAW_PRIMITIVES 0x00000005
23#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006
24#define FE_OPCODE_WAIT 0x00000007
25#define FE_OPCODE_LINK 0x00000008
26#define FE_OPCODE_STALL 0x00000009
27#define FE_OPCODE_CALL 0x0000000a
28#define FE_OPCODE_RETURN 0x0000000b
29#define FE_OPCODE_CHIP_SELECT 0x0000000d
30#define PRIMITIVE_TYPE_POINTS 0x00000001
31#define PRIMITIVE_TYPE_LINES 0x00000002
32#define PRIMITIVE_TYPE_LINE_STRIP 0x00000003
33#define PRIMITIVE_TYPE_TRIANGLES 0x00000004
34#define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005
35#define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006
36#define PRIMITIVE_TYPE_LINE_LOOP 0x00000007
37#define PRIMITIVE_TYPE_QUADS 0x00000008
38#define VIV_FE_LOAD_STATE 0x00000000
39
40#define VIV_FE_LOAD_STATE_HEADER 0x00000000
41#define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000
42#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27
43#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000
44#define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000
45#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000
46#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16
47#define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK)
48#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff
49#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0
50#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK)
51#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2
52
53#define VIV_FE_END 0x00000000
54
55#define VIV_FE_END_HEADER 0x00000000
56#define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f
57#define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0
58#define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK)
59#define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100
60#define VIV_FE_END_HEADER_OP__MASK 0xf8000000
61#define VIV_FE_END_HEADER_OP__SHIFT 27
62#define VIV_FE_END_HEADER_OP_END 0x10000000
63
64#define VIV_FE_NOP 0x00000000
65
66#define VIV_FE_NOP_HEADER 0x00000000
67#define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000
68#define VIV_FE_NOP_HEADER_OP__SHIFT 27
69#define VIV_FE_NOP_HEADER_OP_NOP 0x18000000
70
71#define VIV_FE_DRAW_2D 0x00000000
72
73#define VIV_FE_DRAW_2D_HEADER 0x00000000
74#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00
75#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8
76#define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK)
77#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000
78#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16
79#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK)
80#define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000
81#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27
82#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000
83
84#define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008
85#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff
86#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0
87#define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK)
88#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000
89#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16
90#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK)
91
92#define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c
93#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff
94#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0
95#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK)
96#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000
97#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16
98#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK)
99
100#define VIV_FE_DRAW_PRIMITIVES 0x00000000
101
102#define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000
103#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000
104#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27
105#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000
106
107#define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004
108#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
109#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0
110#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK)
111
112#define VIV_FE_DRAW_PRIMITIVES_START 0x00000008
113
114#define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c
115
116#define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000
117
118#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000
119#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000
120#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27
121#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000
122
123#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004
124#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
125#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0
126#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK)
127
128#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008
129
130#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c
131
132#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010
133
134#define VIV_FE_WAIT 0x00000000
135
136#define VIV_FE_WAIT_HEADER 0x00000000
137#define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff
138#define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0
139#define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK)
140#define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000
141#define VIV_FE_WAIT_HEADER_OP__SHIFT 27
142#define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000
143
144#define VIV_FE_LINK 0x00000000
145
146#define VIV_FE_LINK_HEADER 0x00000000
147#define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff
148#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0
149#define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK)
150#define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000
151#define VIV_FE_LINK_HEADER_OP__SHIFT 27
152#define VIV_FE_LINK_HEADER_OP_LINK 0x40000000
153
154#define VIV_FE_LINK_ADDRESS 0x00000004
155
156#define VIV_FE_STALL 0x00000000
157
158#define VIV_FE_STALL_HEADER 0x00000000
159#define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000
160#define VIV_FE_STALL_HEADER_OP__SHIFT 27
161#define VIV_FE_STALL_HEADER_OP_STALL 0x48000000
162
163#define VIV_FE_STALL_TOKEN 0x00000004
164#define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f
165#define VIV_FE_STALL_TOKEN_FROM__SHIFT 0
166#define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK)
167#define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00
168#define VIV_FE_STALL_TOKEN_TO__SHIFT 8
169#define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK)
170
171#define VIV_FE_CALL 0x00000000
172
173#define VIV_FE_CALL_HEADER 0x00000000
174#define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff
175#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0
176#define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK)
177#define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000
178#define VIV_FE_CALL_HEADER_OP__SHIFT 27
179#define VIV_FE_CALL_HEADER_OP_CALL 0x50000000
180
181#define VIV_FE_CALL_ADDRESS 0x00000004
182
183#define VIV_FE_CALL_RETURN_PREFETCH 0x00000008
184
185#define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c
186
187#define VIV_FE_RETURN 0x00000000
188
189#define VIV_FE_RETURN_HEADER 0x00000000
190#define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000
191#define VIV_FE_RETURN_HEADER_OP__SHIFT 27
192#define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000
193
194#define VIV_FE_CHIP_SELECT 0x00000000
195
196#define VIV_FE_CHIP_SELECT_HEADER 0x00000000
197#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000
198#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27
199#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000
200#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000
201#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000
202#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000
203#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000
204#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800
205#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400
206#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200
207#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100
208#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080
209#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040
210#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020
211#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010
212#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008
213#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004
214#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002
215#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001
216
217
218#endif /* CMDSTREAM_XML */
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
new file mode 100644
index 000000000000..9e585d51fb78
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -0,0 +1,249 @@
1#ifndef COMMON_XML
2#define COMMON_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
12- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
13
14Copyright (C) 2015
15*/
16
17
18#define PIPE_ID_PIPE_3D 0x00000000
19#define PIPE_ID_PIPE_2D 0x00000001
20#define SYNC_RECIPIENT_FE 0x00000001
21#define SYNC_RECIPIENT_RA 0x00000005
22#define SYNC_RECIPIENT_PE 0x00000007
23#define SYNC_RECIPIENT_DE 0x0000000b
24#define SYNC_RECIPIENT_VG 0x0000000f
25#define SYNC_RECIPIENT_TESSELATOR 0x00000010
26#define SYNC_RECIPIENT_VG2 0x00000011
27#define SYNC_RECIPIENT_TESSELATOR2 0x00000012
28#define SYNC_RECIPIENT_VG3 0x00000013
29#define SYNC_RECIPIENT_TESSELATOR3 0x00000014
30#define ENDIAN_MODE_NO_SWAP 0x00000000
31#define ENDIAN_MODE_SWAP_16 0x00000001
32#define ENDIAN_MODE_SWAP_32 0x00000002
33#define chipModel_GC300 0x00000300
34#define chipModel_GC320 0x00000320
35#define chipModel_GC350 0x00000350
36#define chipModel_GC355 0x00000355
37#define chipModel_GC400 0x00000400
38#define chipModel_GC410 0x00000410
39#define chipModel_GC420 0x00000420
40#define chipModel_GC450 0x00000450
41#define chipModel_GC500 0x00000500
42#define chipModel_GC530 0x00000530
43#define chipModel_GC600 0x00000600
44#define chipModel_GC700 0x00000700
45#define chipModel_GC800 0x00000800
46#define chipModel_GC860 0x00000860
47#define chipModel_GC880 0x00000880
48#define chipModel_GC1000 0x00001000
49#define chipModel_GC2000 0x00002000
50#define chipModel_GC2100 0x00002100
51#define chipModel_GC4000 0x00004000
52#define RGBA_BITS_R 0x00000001
53#define RGBA_BITS_G 0x00000002
54#define RGBA_BITS_B 0x00000004
55#define RGBA_BITS_A 0x00000008
56#define chipFeatures_FAST_CLEAR 0x00000001
57#define chipFeatures_SPECIAL_ANTI_ALIASING 0x00000002
58#define chipFeatures_PIPE_3D 0x00000004
59#define chipFeatures_DXT_TEXTURE_COMPRESSION 0x00000008
60#define chipFeatures_DEBUG_MODE 0x00000010
61#define chipFeatures_Z_COMPRESSION 0x00000020
62#define chipFeatures_YUV420_SCALER 0x00000040
63#define chipFeatures_MSAA 0x00000080
64#define chipFeatures_DC 0x00000100
65#define chipFeatures_PIPE_2D 0x00000200
66#define chipFeatures_ETC1_TEXTURE_COMPRESSION 0x00000400
67#define chipFeatures_FAST_SCALER 0x00000800
68#define chipFeatures_HIGH_DYNAMIC_RANGE 0x00001000
69#define chipFeatures_YUV420_TILER 0x00002000
70#define chipFeatures_MODULE_CG 0x00004000
71#define chipFeatures_MIN_AREA 0x00008000
72#define chipFeatures_NO_EARLY_Z 0x00010000
73#define chipFeatures_NO_422_TEXTURE 0x00020000
74#define chipFeatures_BUFFER_INTERLEAVING 0x00040000
75#define chipFeatures_BYTE_WRITE_2D 0x00080000
76#define chipFeatures_NO_SCALER 0x00100000
77#define chipFeatures_YUY2_AVERAGING 0x00200000
78#define chipFeatures_HALF_PE_CACHE 0x00400000
79#define chipFeatures_HALF_TX_CACHE 0x00800000
80#define chipFeatures_YUY2_RENDER_TARGET 0x01000000
81#define chipFeatures_MEM32 0x02000000
82#define chipFeatures_PIPE_VG 0x04000000
83#define chipFeatures_VGTS 0x08000000
84#define chipFeatures_FE20 0x10000000
85#define chipFeatures_BYTE_WRITE_3D 0x20000000
86#define chipFeatures_RS_YUV_TARGET 0x40000000
87#define chipFeatures_32_BIT_INDICES 0x80000000
88#define chipMinorFeatures0_FLIP_Y 0x00000001
89#define chipMinorFeatures0_DUAL_RETURN_BUS 0x00000002
90#define chipMinorFeatures0_ENDIANNESS_CONFIG 0x00000004
91#define chipMinorFeatures0_TEXTURE_8K 0x00000008
92#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER 0x00000010
93#define chipMinorFeatures0_SPECIAL_MSAA_LOD 0x00000020
94#define chipMinorFeatures0_FAST_CLEAR_FLUSH 0x00000040
95#define chipMinorFeatures0_2DPE20 0x00000080
96#define chipMinorFeatures0_CORRECT_AUTO_DISABLE 0x00000100
97#define chipMinorFeatures0_RENDERTARGET_8K 0x00000200
98#define chipMinorFeatures0_2BITPERTILE 0x00000400
99#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED 0x00000800
100#define chipMinorFeatures0_SUPER_TILED 0x00001000
101#define chipMinorFeatures0_VG_20 0x00002000
102#define chipMinorFeatures0_TS_EXTENDED_COMMANDS 0x00004000
103#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED 0x00008000
104#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL 0x00010000
105#define chipMinorFeatures0_VG_FILTER 0x00020000
106#define chipMinorFeatures0_VG_21 0x00040000
107#define chipMinorFeatures0_SHADER_HAS_W 0x00080000
108#define chipMinorFeatures0_HAS_SQRT_TRIG 0x00100000
109#define chipMinorFeatures0_MORE_MINOR_FEATURES 0x00200000
110#define chipMinorFeatures0_MC20 0x00400000
111#define chipMinorFeatures0_MSAA_SIDEBAND 0x00800000
112#define chipMinorFeatures0_BUG_FIXES0 0x01000000
113#define chipMinorFeatures0_VAA 0x02000000
114#define chipMinorFeatures0_BYPASS_IN_MSAA 0x04000000
115#define chipMinorFeatures0_HZ 0x08000000
116#define chipMinorFeatures0_NEW_TEXTURE 0x10000000
117#define chipMinorFeatures0_2D_A8_TARGET 0x20000000
118#define chipMinorFeatures0_CORRECT_STENCIL 0x40000000
119#define chipMinorFeatures0_ENHANCE_VR 0x80000000
120#define chipMinorFeatures1_RSUV_SWIZZLE 0x00000001
121#define chipMinorFeatures1_V2_COMPRESSION 0x00000002
122#define chipMinorFeatures1_VG_DOUBLE_BUFFER 0x00000004
123#define chipMinorFeatures1_EXTRA_EVENT_STATES 0x00000008
124#define chipMinorFeatures1_NO_STRIPING_NEEDED 0x00000010
125#define chipMinorFeatures1_TEXTURE_STRIDE 0x00000020
126#define chipMinorFeatures1_BUG_FIXES3 0x00000040
127#define chipMinorFeatures1_AUTO_DISABLE 0x00000080
128#define chipMinorFeatures1_AUTO_RESTART_TS 0x00000100
129#define chipMinorFeatures1_DISABLE_PE_GATING 0x00000200
130#define chipMinorFeatures1_L2_WINDOWING 0x00000400
131#define chipMinorFeatures1_HALF_FLOAT 0x00000800
132#define chipMinorFeatures1_PIXEL_DITHER 0x00001000
133#define chipMinorFeatures1_TWO_STENCIL_REFERENCE 0x00002000
134#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT 0x00004000
135#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH 0x00008000
136#define chipMinorFeatures1_2D_DITHER 0x00010000
137#define chipMinorFeatures1_BUG_FIXES5 0x00020000
138#define chipMinorFeatures1_NEW_2D 0x00040000
139#define chipMinorFeatures1_NEW_FP 0x00080000
140#define chipMinorFeatures1_TEXTURE_HALIGN 0x00100000
141#define chipMinorFeatures1_NON_POWER_OF_TWO 0x00200000
142#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT 0x00400000
143#define chipMinorFeatures1_HALTI0 0x00800000
144#define chipMinorFeatures1_CORRECT_OVERFLOW_VG 0x01000000
145#define chipMinorFeatures1_NEGATIVE_LOG_FIX 0x02000000
146#define chipMinorFeatures1_RESOLVE_OFFSET 0x04000000
147#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK 0x08000000
148#define chipMinorFeatures1_MMU_VERSION 0x10000000
149#define chipMinorFeatures1_WIDE_LINE 0x20000000
150#define chipMinorFeatures1_BUG_FIXES6 0x40000000
151#define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000
152#define chipMinorFeatures2_LINE_LOOP 0x00000001
153#define chipMinorFeatures2_LOGIC_OP 0x00000002
154#define chipMinorFeatures2_UNK2 0x00000004
155#define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008
156#define chipMinorFeatures2_UNK4 0x00000010
157#define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020
158#define chipMinorFeatures2_COMPOSITION 0x00000040
159#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080
160#define chipMinorFeatures2_UNK8 0x00000100
161#define chipMinorFeatures2_UNK9 0x00000200
162#define chipMinorFeatures2_UNK10 0x00000400
163#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800
164#define chipMinorFeatures2_UNK12 0x00001000
165#define chipMinorFeatures2_UNK13 0x00002000
166#define chipMinorFeatures2_UNK14 0x00004000
167#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000
168#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000
169#define chipMinorFeatures2_2D_TILING 0x00020000
170#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000
171#define chipMinorFeatures2_TILE_FILLER 0x00080000
172#define chipMinorFeatures2_UNK20 0x00100000
173#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000
174#define chipMinorFeatures2_UNK22 0x00400000
175#define chipMinorFeatures2_UNK23 0x00800000
176#define chipMinorFeatures2_UNK24 0x01000000
177#define chipMinorFeatures2_MIXED_STREAMS 0x02000000
178#define chipMinorFeatures2_2D_420_L2CACHE 0x04000000
179#define chipMinorFeatures2_UNK27 0x08000000
180#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000
181#define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000
182#define chipMinorFeatures2_UNK30 0x40000000
183#define chipMinorFeatures2_UNK31 0x80000000
184#define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001
185#define chipMinorFeatures3_UNK1 0x00000002
186#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004
187#define chipMinorFeatures3_UNK3 0x00000008
188#define chipMinorFeatures3_UNK4 0x00000010
189#define chipMinorFeatures3_UNK5 0x00000020
190#define chipMinorFeatures3_UNK6 0x00000040
191#define chipMinorFeatures3_UNK7 0x00000080
192#define chipMinorFeatures3_UNK8 0x00000100
193#define chipMinorFeatures3_UNK9 0x00000200
194#define chipMinorFeatures3_BUG_FIXES10 0x00000400
195#define chipMinorFeatures3_UNK11 0x00000800
196#define chipMinorFeatures3_BUG_FIXES11 0x00001000
197#define chipMinorFeatures3_UNK13 0x00002000
198#define chipMinorFeatures3_UNK14 0x00004000
199#define chipMinorFeatures3_UNK15 0x00008000
200#define chipMinorFeatures3_UNK16 0x00010000
201#define chipMinorFeatures3_UNK17 0x00020000
202#define chipMinorFeatures3_UNK18 0x00040000
203#define chipMinorFeatures3_UNK19 0x00080000
204#define chipMinorFeatures3_UNK20 0x00100000
205#define chipMinorFeatures3_UNK21 0x00200000
206#define chipMinorFeatures3_UNK22 0x00400000
207#define chipMinorFeatures3_UNK23 0x00800000
208#define chipMinorFeatures3_UNK24 0x01000000
209#define chipMinorFeatures3_UNK25 0x02000000
210#define chipMinorFeatures3_UNK26 0x04000000
211#define chipMinorFeatures3_UNK27 0x08000000
212#define chipMinorFeatures3_UNK28 0x10000000
213#define chipMinorFeatures3_UNK29 0x20000000
214#define chipMinorFeatures3_UNK30 0x40000000
215#define chipMinorFeatures3_UNK31 0x80000000
216#define chipMinorFeatures4_UNK0 0x00000001
217#define chipMinorFeatures4_UNK1 0x00000002
218#define chipMinorFeatures4_UNK2 0x00000004
219#define chipMinorFeatures4_UNK3 0x00000008
220#define chipMinorFeatures4_UNK4 0x00000010
221#define chipMinorFeatures4_UNK5 0x00000020
222#define chipMinorFeatures4_UNK6 0x00000040
223#define chipMinorFeatures4_UNK7 0x00000080
224#define chipMinorFeatures4_UNK8 0x00000100
225#define chipMinorFeatures4_UNK9 0x00000200
226#define chipMinorFeatures4_UNK10 0x00000400
227#define chipMinorFeatures4_UNK11 0x00000800
228#define chipMinorFeatures4_UNK12 0x00001000
229#define chipMinorFeatures4_UNK13 0x00002000
230#define chipMinorFeatures4_UNK14 0x00004000
231#define chipMinorFeatures4_UNK15 0x00008000
232#define chipMinorFeatures4_UNK16 0x00010000
233#define chipMinorFeatures4_UNK17 0x00020000
234#define chipMinorFeatures4_UNK18 0x00040000
235#define chipMinorFeatures4_UNK19 0x00080000
236#define chipMinorFeatures4_UNK20 0x00100000
237#define chipMinorFeatures4_UNK21 0x00200000
238#define chipMinorFeatures4_UNK22 0x00400000
239#define chipMinorFeatures4_UNK23 0x00800000
240#define chipMinorFeatures4_UNK24 0x01000000
241#define chipMinorFeatures4_UNK25 0x02000000
242#define chipMinorFeatures4_UNK26 0x04000000
243#define chipMinorFeatures4_UNK27 0x08000000
244#define chipMinorFeatures4_UNK28 0x10000000
245#define chipMinorFeatures4_UNK29 0x20000000
246#define chipMinorFeatures4_UNK30 0x40000000
247#define chipMinorFeatures4_UNK31 0x80000000
248
249#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
new file mode 100644
index 000000000000..332c55ebba6d
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -0,0 +1,268 @@
1/*
2 * Copyright (C) 2014 Etnaviv Project
3 * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "etnaviv_gpu.h"
19#include "etnaviv_gem.h"
20#include "etnaviv_mmu.h"
21
22#include "common.xml.h"
23#include "state.xml.h"
24#include "cmdstream.xml.h"
25
26/*
27 * Command Buffer helper:
28 */
29
30
31static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
32{
33 u32 *vaddr = (u32 *)buffer->vaddr;
34
35 BUG_ON(buffer->user_size >= buffer->size);
36
37 vaddr[buffer->user_size / 4] = data;
38 buffer->user_size += 4;
39}
40
41static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
42 u32 reg, u32 value)
43{
44 u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
45
46 buffer->user_size = ALIGN(buffer->user_size, 8);
47
48 /* write a register via cmd stream */
49 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
50 VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
51 VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
52 OUT(buffer, value);
53}
54
55static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
56{
57 buffer->user_size = ALIGN(buffer->user_size, 8);
58
59 OUT(buffer, VIV_FE_END_HEADER_OP_END);
60}
61
62static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
63{
64 buffer->user_size = ALIGN(buffer->user_size, 8);
65
66 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
67}
68
69static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
70 u16 prefetch, u32 address)
71{
72 buffer->user_size = ALIGN(buffer->user_size, 8);
73
74 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
75 VIV_FE_LINK_HEADER_PREFETCH(prefetch));
76 OUT(buffer, address);
77}
78
79static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
80 u32 from, u32 to)
81{
82 buffer->user_size = ALIGN(buffer->user_size, 8);
83
84 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
85 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
86}
87
88static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
89{
90 u32 flush;
91 u32 stall;
92
93 /*
94 * This assumes that if we're switching to 2D, we're switching
95 * away from 3D, and vice versa. Hence, if we're switching to
96 * the 2D core, we need to flush the 3D depth and color caches,
97 * otherwise we need to flush the 2D pixel engine cache.
98 */
99 if (pipe == ETNA_PIPE_2D)
100 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
101 else
102 flush = VIVS_GL_FLUSH_CACHE_PE2D;
103
104 stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
105 VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
106
107 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
108 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
109
110 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
111
112 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
113 VIVS_GL_PIPE_SELECT_PIPE(pipe));
114}
115
116static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
117{
118 return buf->paddr - gpu->memory_base;
119}
120
121static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
122 struct etnaviv_cmdbuf *buf, u32 off, u32 len)
123{
124 u32 size = buf->size;
125 u32 *ptr = buf->vaddr + off;
126
127 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
128 ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
129
130 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
131 ptr, len * 4, 0);
132}
133
134u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
135{
136 struct etnaviv_cmdbuf *buffer = gpu->buffer;
137
138 /* initialize buffer */
139 buffer->user_size = 0;
140
141 CMD_WAIT(buffer);
142 CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
143
144 return buffer->user_size / 8;
145}
146
147void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
148{
149 struct etnaviv_cmdbuf *buffer = gpu->buffer;
150
151 /* Replace the last WAIT with an END */
152 buffer->user_size -= 16;
153
154 CMD_END(buffer);
155 mb();
156}
157
158void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
159 struct etnaviv_cmdbuf *cmdbuf)
160{
161 struct etnaviv_cmdbuf *buffer = gpu->buffer;
162 u32 *lw = buffer->vaddr + buffer->user_size - 16;
163 u32 back, link_target, link_size, reserve_size, extra_size = 0;
164
165 if (drm_debug & DRM_UT_DRIVER)
166 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
167
168 /*
169 * If we need to flush the MMU prior to submitting this buffer, we
170 * will need to append a mmu flush load state, followed by a new
171 * link to this buffer - a total of four additional words.
172 */
173 if (gpu->mmu->need_flush || gpu->switch_context) {
174 /* link command */
175 extra_size += 2;
176 /* flush command */
177 if (gpu->mmu->need_flush)
178 extra_size += 2;
179 /* pipe switch commands */
180 if (gpu->switch_context)
181 extra_size += 8;
182 }
183
184 reserve_size = (6 + extra_size) * 4;
185
186 /*
187 * if we are going to completely overflow the buffer, we need to wrap.
188 */
189 if (buffer->user_size + reserve_size > buffer->size)
190 buffer->user_size = 0;
191
192 /* save offset back into main buffer */
193 back = buffer->user_size + reserve_size - 6 * 4;
194 link_target = gpu_va(gpu, buffer) + buffer->user_size;
195 link_size = 6;
196
197 /* Skip over any extra instructions */
198 link_target += extra_size * sizeof(u32);
199
200 if (drm_debug & DRM_UT_DRIVER)
201 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
202 link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
203
204 /* jump back from cmd to main buffer */
205 CMD_LINK(cmdbuf, link_size, link_target);
206
207 link_target = gpu_va(gpu, cmdbuf);
208 link_size = cmdbuf->size / 8;
209
210
211
212 if (drm_debug & DRM_UT_DRIVER) {
213 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
214 cmdbuf->vaddr, cmdbuf->size, 0);
215
216 pr_info("link op: %p\n", lw);
217 pr_info("link addr: %p\n", lw + 1);
218 pr_info("addr: 0x%08x\n", link_target);
219 pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
220 pr_info("event: %d\n", event);
221 }
222
223 if (gpu->mmu->need_flush || gpu->switch_context) {
224 u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
225
226 if (gpu->mmu->need_flush) {
227 /* Add the MMU flush */
228 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
229 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
230 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
231 VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
232 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
233 VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
234
235 gpu->mmu->need_flush = false;
236 }
237
238 if (gpu->switch_context) {
239 etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
240 gpu->switch_context = false;
241 }
242
243 /* And the link to the first buffer */
244 CMD_LINK(buffer, link_size, link_target);
245
246 /* Update the link target to point to above instructions */
247 link_target = new_target;
248 link_size = extra_size;
249 }
250
251 /* trigger event */
252 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
253 VIVS_GL_EVENT_FROM_PE);
254
255 /* append WAIT/LINK to main buffer */
256 CMD_WAIT(buffer);
257 CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
258
259 /* Change WAIT into a LINK command; write the address first. */
260 *(lw + 1) = link_target;
261 mb();
262 *(lw) = VIV_FE_LINK_HEADER_OP_LINK |
263 VIV_FE_LINK_HEADER_PREFETCH(link_size);
264 mb();
265
266 if (drm_debug & DRM_UT_DRIVER)
267 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
268}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
new file mode 100644
index 000000000000..dcfd565c88d1
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18
19#include "etnaviv_gem.h"
20#include "etnaviv_gpu.h"
21
22#include "cmdstream.xml.h"
23
24#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
25
26struct etna_validation_state {
27 struct etnaviv_gpu *gpu;
28 const struct drm_etnaviv_gem_submit_reloc *relocs;
29 unsigned int num_relocs;
30 u32 *start;
31};
32
33static const struct {
34 u16 offset;
35 u16 size;
36} etnaviv_sensitive_states[] __initconst = {
37#define ST(start, num) { (start) >> 2, (num) }
38 /* 2D */
39 ST(0x1200, 1),
40 ST(0x1228, 1),
41 ST(0x1238, 1),
42 ST(0x1284, 1),
43 ST(0x128c, 1),
44 ST(0x1304, 1),
45 ST(0x1310, 1),
46 ST(0x1318, 1),
47 ST(0x12800, 4),
48 ST(0x128a0, 4),
49 ST(0x128c0, 4),
50 ST(0x12970, 4),
51 ST(0x12a00, 8),
52 ST(0x12b40, 8),
53 ST(0x12b80, 8),
54 ST(0x12ce0, 8),
55 /* 3D */
56 ST(0x0644, 1),
57 ST(0x064c, 1),
58 ST(0x0680, 8),
59 ST(0x1410, 1),
60 ST(0x1430, 1),
61 ST(0x1458, 1),
62 ST(0x1460, 8),
63 ST(0x1480, 8),
64 ST(0x1500, 8),
65 ST(0x1520, 8),
66 ST(0x1608, 1),
67 ST(0x1610, 1),
68 ST(0x1658, 1),
69 ST(0x165c, 1),
70 ST(0x1664, 1),
71 ST(0x1668, 1),
72 ST(0x16a4, 1),
73 ST(0x16c0, 8),
74 ST(0x16e0, 8),
75 ST(0x1740, 8),
76 ST(0x2400, 14 * 16),
77 ST(0x10800, 32 * 16),
78#undef ST
79};
80
81#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u)
82static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE);
83
84void __init etnaviv_validate_init(void)
85{
86 unsigned int i;
87
88 for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++)
89 bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset,
90 etnaviv_sensitive_states[i].size);
91}
92
93static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state,
94 unsigned int buf_offset, unsigned int state_addr)
95{
96 if (state->num_relocs && state->relocs->submit_offset < buf_offset) {
97 dev_warn_once(state->gpu->dev,
98 "%s: relocation for non-sensitive state 0x%x at offset %u\n",
99 __func__, state_addr,
100 state->relocs->submit_offset);
101 while (state->num_relocs &&
102 state->relocs->submit_offset < buf_offset) {
103 state->relocs++;
104 state->num_relocs--;
105 }
106 }
107}
108
109static bool etnaviv_validate_load_state(struct etna_validation_state *state,
110 u32 *ptr, unsigned int state_offset, unsigned int num)
111{
112 unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num);
113 unsigned int st_offset = state_offset, buf_offset;
114
115 for_each_set_bit_from(st_offset, etnaviv_states, size) {
116 buf_offset = (ptr - state->start +
117 st_offset - state_offset) * 4;
118
119 etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4);
120 if (state->num_relocs &&
121 state->relocs->submit_offset == buf_offset) {
122 state->relocs++;
123 state->num_relocs--;
124 continue;
125 }
126
127 dev_warn_ratelimited(state->gpu->dev,
128 "%s: load state touches restricted state 0x%x at offset %u\n",
129 __func__, st_offset * 4, buf_offset);
130 return false;
131 }
132
133 if (state->num_relocs) {
134 buf_offset = (ptr - state->start + num) * 4;
135 etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 +
136 state->relocs->submit_offset -
137 buf_offset);
138 }
139
140 return true;
141}
142
143static uint8_t cmd_length[32] = {
144 [FE_OPCODE_DRAW_PRIMITIVES] = 4,
145 [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
146 [FE_OPCODE_NOP] = 2,
147 [FE_OPCODE_STALL] = 2,
148};
149
150bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream,
151 unsigned int size,
152 struct drm_etnaviv_gem_submit_reloc *relocs,
153 unsigned int reloc_size)
154{
155 struct etna_validation_state state;
156 u32 *buf = stream;
157 u32 *end = buf + size;
158
159 state.gpu = gpu;
160 state.relocs = relocs;
161 state.num_relocs = reloc_size;
162 state.start = stream;
163
164 while (buf < end) {
165 u32 cmd = *buf;
166 unsigned int len, n, off;
167 unsigned int op = cmd >> 27;
168
169 switch (op) {
170 case FE_OPCODE_LOAD_STATE:
171 n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
172 len = ALIGN(1 + n, 2);
173 if (buf + len > end)
174 break;
175
176 off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
177 if (!etnaviv_validate_load_state(&state, buf + 1,
178 off, n))
179 return false;
180 break;
181
182 case FE_OPCODE_DRAW_2D:
183 n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
184 if (n == 0)
185 n = 256;
186 len = 2 + n * 2;
187 break;
188
189 default:
190 len = cmd_length[op];
191 if (len == 0) {
192 dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
193 __func__, op, buf - state.start);
194 return false;
195 }
196 break;
197 }
198
199 buf += len;
200 }
201
202 if (buf > end) {
203 dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
204 __func__, buf - state.start, size);
205 return false;
206 }
207
208 return true;
209}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
new file mode 100644
index 000000000000..5c89ebb52fd2
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -0,0 +1,707 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/component.h>
18#include <linux/of_platform.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gpu.h"
22#include "etnaviv_gem.h"
23#include "etnaviv_mmu.h"
24#include "etnaviv_gem.h"
25
26#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
27static bool reglog;
28MODULE_PARM_DESC(reglog, "Enable register read/write logging");
29module_param(reglog, bool, 0600);
30#else
31#define reglog 0
32#endif
33
34void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
35 const char *dbgname)
36{
37 struct resource *res;
38 void __iomem *ptr;
39
40 if (name)
41 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
42 else
43 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
44
45 ptr = devm_ioremap_resource(&pdev->dev, res);
46 if (IS_ERR(ptr)) {
47 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
48 PTR_ERR(ptr));
49 return ptr;
50 }
51
52 if (reglog)
53 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
54 dbgname, ptr, (size_t)resource_size(res));
55
56 return ptr;
57}
58
59void etnaviv_writel(u32 data, void __iomem *addr)
60{
61 if (reglog)
62 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
63
64 writel(data, addr);
65}
66
67u32 etnaviv_readl(const void __iomem *addr)
68{
69 u32 val = readl(addr);
70
71 if (reglog)
72 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
73
74 return val;
75}
76
77/*
78 * DRM operations:
79 */
80
81
82static void load_gpu(struct drm_device *dev)
83{
84 struct etnaviv_drm_private *priv = dev->dev_private;
85 unsigned int i;
86
87 for (i = 0; i < ETNA_MAX_PIPES; i++) {
88 struct etnaviv_gpu *g = priv->gpu[i];
89
90 if (g) {
91 int ret;
92
93 ret = etnaviv_gpu_init(g);
94 if (ret) {
95 dev_err(g->dev, "hw init failed: %d\n", ret);
96 priv->gpu[i] = NULL;
97 }
98 }
99 }
100}
101
102static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
103{
104 struct etnaviv_file_private *ctx;
105
106 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
107 if (!ctx)
108 return -ENOMEM;
109
110 file->driver_priv = ctx;
111
112 return 0;
113}
114
115static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
116{
117 struct etnaviv_drm_private *priv = dev->dev_private;
118 struct etnaviv_file_private *ctx = file->driver_priv;
119 unsigned int i;
120
121 for (i = 0; i < ETNA_MAX_PIPES; i++) {
122 struct etnaviv_gpu *gpu = priv->gpu[i];
123
124 if (gpu) {
125 mutex_lock(&gpu->lock);
126 if (gpu->lastctx == ctx)
127 gpu->lastctx = NULL;
128 mutex_unlock(&gpu->lock);
129 }
130 }
131
132 kfree(ctx);
133}
134
135/*
136 * DRM debugfs:
137 */
138
139#ifdef CONFIG_DEBUG_FS
140static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
141{
142 struct etnaviv_drm_private *priv = dev->dev_private;
143
144 etnaviv_gem_describe_objects(priv, m);
145
146 return 0;
147}
148
149static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
150{
151 int ret;
152
153 read_lock(&dev->vma_offset_manager->vm_lock);
154 ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
155 read_unlock(&dev->vma_offset_manager->vm_lock);
156
157 return ret;
158}
159
160static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
161{
162 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
163
164 mutex_lock(&gpu->mmu->lock);
165 drm_mm_dump_table(m, &gpu->mmu->mm);
166 mutex_unlock(&gpu->mmu->lock);
167
168 return 0;
169}
170
171static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
172{
173 struct etnaviv_cmdbuf *buf = gpu->buffer;
174 u32 size = buf->size;
175 u32 *ptr = buf->vaddr;
176 u32 i;
177
178 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
179 buf->vaddr, (u64)buf->paddr, size - buf->user_size);
180
181 for (i = 0; i < size / 4; i++) {
182 if (i && !(i % 4))
183 seq_puts(m, "\n");
184 if (i % 4 == 0)
185 seq_printf(m, "\t0x%p: ", ptr + i);
186 seq_printf(m, "%08x ", *(ptr + i));
187 }
188 seq_puts(m, "\n");
189}
190
191static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
192{
193 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
194
195 mutex_lock(&gpu->lock);
196 etnaviv_buffer_dump(gpu, m);
197 mutex_unlock(&gpu->lock);
198
199 return 0;
200}
201
202static int show_unlocked(struct seq_file *m, void *arg)
203{
204 struct drm_info_node *node = (struct drm_info_node *) m->private;
205 struct drm_device *dev = node->minor->dev;
206 int (*show)(struct drm_device *dev, struct seq_file *m) =
207 node->info_ent->data;
208
209 return show(dev, m);
210}
211
212static int show_each_gpu(struct seq_file *m, void *arg)
213{
214 struct drm_info_node *node = (struct drm_info_node *) m->private;
215 struct drm_device *dev = node->minor->dev;
216 struct etnaviv_drm_private *priv = dev->dev_private;
217 struct etnaviv_gpu *gpu;
218 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
219 node->info_ent->data;
220 unsigned int i;
221 int ret = 0;
222
223 for (i = 0; i < ETNA_MAX_PIPES; i++) {
224 gpu = priv->gpu[i];
225 if (!gpu)
226 continue;
227
228 ret = show(gpu, m);
229 if (ret < 0)
230 break;
231 }
232
233 return ret;
234}
235
236static struct drm_info_list etnaviv_debugfs_list[] = {
237 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
238 {"gem", show_unlocked, 0, etnaviv_gem_show},
239 { "mm", show_unlocked, 0, etnaviv_mm_show },
240 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
241 {"ring", show_each_gpu, 0, etnaviv_ring_show},
242};
243
244static int etnaviv_debugfs_init(struct drm_minor *minor)
245{
246 struct drm_device *dev = minor->dev;
247 int ret;
248
249 ret = drm_debugfs_create_files(etnaviv_debugfs_list,
250 ARRAY_SIZE(etnaviv_debugfs_list),
251 minor->debugfs_root, minor);
252
253 if (ret) {
254 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
255 return ret;
256 }
257
258 return ret;
259}
260
261static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
262{
263 drm_debugfs_remove_files(etnaviv_debugfs_list,
264 ARRAY_SIZE(etnaviv_debugfs_list), minor);
265}
266#endif
267
268/*
269 * DRM ioctls:
270 */
271
272static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
273 struct drm_file *file)
274{
275 struct etnaviv_drm_private *priv = dev->dev_private;
276 struct drm_etnaviv_param *args = data;
277 struct etnaviv_gpu *gpu;
278
279 if (args->pipe >= ETNA_MAX_PIPES)
280 return -EINVAL;
281
282 gpu = priv->gpu[args->pipe];
283 if (!gpu)
284 return -ENXIO;
285
286 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
287}
288
289static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
290 struct drm_file *file)
291{
292 struct drm_etnaviv_gem_new *args = data;
293
294 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
295 ETNA_BO_FORCE_MMU))
296 return -EINVAL;
297
298 return etnaviv_gem_new_handle(dev, file, args->size,
299 args->flags, &args->handle);
300}
301
302#define TS(t) ((struct timespec){ \
303 .tv_sec = (t).tv_sec, \
304 .tv_nsec = (t).tv_nsec \
305})
306
307static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
308 struct drm_file *file)
309{
310 struct drm_etnaviv_gem_cpu_prep *args = data;
311 struct drm_gem_object *obj;
312 int ret;
313
314 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
315 return -EINVAL;
316
317 obj = drm_gem_object_lookup(dev, file, args->handle);
318 if (!obj)
319 return -ENOENT;
320
321 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
322
323 drm_gem_object_unreference_unlocked(obj);
324
325 return ret;
326}
327
328static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
329 struct drm_file *file)
330{
331 struct drm_etnaviv_gem_cpu_fini *args = data;
332 struct drm_gem_object *obj;
333 int ret;
334
335 if (args->flags)
336 return -EINVAL;
337
338 obj = drm_gem_object_lookup(dev, file, args->handle);
339 if (!obj)
340 return -ENOENT;
341
342 ret = etnaviv_gem_cpu_fini(obj);
343
344 drm_gem_object_unreference_unlocked(obj);
345
346 return ret;
347}
348
349static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
350 struct drm_file *file)
351{
352 struct drm_etnaviv_gem_info *args = data;
353 struct drm_gem_object *obj;
354 int ret;
355
356 if (args->pad)
357 return -EINVAL;
358
359 obj = drm_gem_object_lookup(dev, file, args->handle);
360 if (!obj)
361 return -ENOENT;
362
363 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
364 drm_gem_object_unreference_unlocked(obj);
365
366 return ret;
367}
368
369static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
370 struct drm_file *file)
371{
372 struct drm_etnaviv_wait_fence *args = data;
373 struct etnaviv_drm_private *priv = dev->dev_private;
374 struct timespec *timeout = &TS(args->timeout);
375 struct etnaviv_gpu *gpu;
376
377 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
378 return -EINVAL;
379
380 if (args->pipe >= ETNA_MAX_PIPES)
381 return -EINVAL;
382
383 gpu = priv->gpu[args->pipe];
384 if (!gpu)
385 return -ENXIO;
386
387 if (args->flags & ETNA_WAIT_NONBLOCK)
388 timeout = NULL;
389
390 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
391 timeout);
392}
393
394static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
395 struct drm_file *file)
396{
397 struct drm_etnaviv_gem_userptr *args = data;
398 int access;
399
400 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
401 args->flags == 0)
402 return -EINVAL;
403
404 if (offset_in_page(args->user_ptr | args->user_size) ||
405 (uintptr_t)args->user_ptr != args->user_ptr ||
406 (u32)args->user_size != args->user_size ||
407 args->user_ptr & ~PAGE_MASK)
408 return -EINVAL;
409
410 if (args->flags & ETNA_USERPTR_WRITE)
411 access = VERIFY_WRITE;
412 else
413 access = VERIFY_READ;
414
415 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
416 args->user_size))
417 return -EFAULT;
418
419 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
420 args->user_size, args->flags,
421 &args->handle);
422}
423
424static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
425 struct drm_file *file)
426{
427 struct etnaviv_drm_private *priv = dev->dev_private;
428 struct drm_etnaviv_gem_wait *args = data;
429 struct timespec *timeout = &TS(args->timeout);
430 struct drm_gem_object *obj;
431 struct etnaviv_gpu *gpu;
432 int ret;
433
434 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
435 return -EINVAL;
436
437 if (args->pipe >= ETNA_MAX_PIPES)
438 return -EINVAL;
439
440 gpu = priv->gpu[args->pipe];
441 if (!gpu)
442 return -ENXIO;
443
444 obj = drm_gem_object_lookup(dev, file, args->handle);
445 if (!obj)
446 return -ENOENT;
447
448 if (args->flags & ETNA_WAIT_NONBLOCK)
449 timeout = NULL;
450
451 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
452
453 drm_gem_object_unreference_unlocked(obj);
454
455 return ret;
456}
457
458static const struct drm_ioctl_desc etnaviv_ioctls[] = {
459#define ETNA_IOCTL(n, func, flags) \
460 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
461 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
462 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
463 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
464 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
465 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
466 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
467 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
468 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
469 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
470};
471
472static const struct vm_operations_struct vm_ops = {
473 .fault = etnaviv_gem_fault,
474 .open = drm_gem_vm_open,
475 .close = drm_gem_vm_close,
476};
477
478static const struct file_operations fops = {
479 .owner = THIS_MODULE,
480 .open = drm_open,
481 .release = drm_release,
482 .unlocked_ioctl = drm_ioctl,
483#ifdef CONFIG_COMPAT
484 .compat_ioctl = drm_compat_ioctl,
485#endif
486 .poll = drm_poll,
487 .read = drm_read,
488 .llseek = no_llseek,
489 .mmap = etnaviv_gem_mmap,
490};
491
492static struct drm_driver etnaviv_drm_driver = {
493 .driver_features = DRIVER_HAVE_IRQ |
494 DRIVER_GEM |
495 DRIVER_PRIME |
496 DRIVER_RENDER,
497 .open = etnaviv_open,
498 .preclose = etnaviv_preclose,
499 .set_busid = drm_platform_set_busid,
500 .gem_free_object = etnaviv_gem_free_object,
501 .gem_vm_ops = &vm_ops,
502 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
503 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
504 .gem_prime_export = drm_gem_prime_export,
505 .gem_prime_import = drm_gem_prime_import,
506 .gem_prime_pin = etnaviv_gem_prime_pin,
507 .gem_prime_unpin = etnaviv_gem_prime_unpin,
508 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
509 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
510 .gem_prime_vmap = etnaviv_gem_prime_vmap,
511 .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
512#ifdef CONFIG_DEBUG_FS
513 .debugfs_init = etnaviv_debugfs_init,
514 .debugfs_cleanup = etnaviv_debugfs_cleanup,
515#endif
516 .ioctls = etnaviv_ioctls,
517 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
518 .fops = &fops,
519 .name = "etnaviv",
520 .desc = "etnaviv DRM",
521 .date = "20151214",
522 .major = 1,
523 .minor = 0,
524};
525
526/*
527 * Platform driver:
528 */
529static int etnaviv_bind(struct device *dev)
530{
531 struct etnaviv_drm_private *priv;
532 struct drm_device *drm;
533 int ret;
534
535 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
536 if (!drm)
537 return -ENOMEM;
538
539 drm->platformdev = to_platform_device(dev);
540
541 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
542 if (!priv) {
543 dev_err(dev, "failed to allocate private data\n");
544 ret = -ENOMEM;
545 goto out_unref;
546 }
547 drm->dev_private = priv;
548
549 priv->wq = alloc_ordered_workqueue("etnaviv", 0);
550 if (!priv->wq) {
551 ret = -ENOMEM;
552 goto out_wq;
553 }
554
555 mutex_init(&priv->gem_lock);
556 INIT_LIST_HEAD(&priv->gem_list);
557 priv->num_gpus = 0;
558
559 dev_set_drvdata(dev, drm);
560
561 ret = component_bind_all(dev, drm);
562 if (ret < 0)
563 goto out_bind;
564
565 load_gpu(drm);
566
567 ret = drm_dev_register(drm, 0);
568 if (ret)
569 goto out_register;
570
571 return 0;
572
573out_register:
574 component_unbind_all(dev, drm);
575out_bind:
576 flush_workqueue(priv->wq);
577 destroy_workqueue(priv->wq);
578out_wq:
579 kfree(priv);
580out_unref:
581 drm_dev_unref(drm);
582
583 return ret;
584}
585
586static void etnaviv_unbind(struct device *dev)
587{
588 struct drm_device *drm = dev_get_drvdata(dev);
589 struct etnaviv_drm_private *priv = drm->dev_private;
590
591 drm_dev_unregister(drm);
592
593 flush_workqueue(priv->wq);
594 destroy_workqueue(priv->wq);
595
596 component_unbind_all(dev, drm);
597
598 drm->dev_private = NULL;
599 kfree(priv);
600
601 drm_put_dev(drm);
602}
603
604static const struct component_master_ops etnaviv_master_ops = {
605 .bind = etnaviv_bind,
606 .unbind = etnaviv_unbind,
607};
608
609static int compare_of(struct device *dev, void *data)
610{
611 struct device_node *np = data;
612
613 return dev->of_node == np;
614}
615
616static int compare_str(struct device *dev, void *data)
617{
618 return !strcmp(dev_name(dev), data);
619}
620
621static int etnaviv_pdev_probe(struct platform_device *pdev)
622{
623 struct device *dev = &pdev->dev;
624 struct device_node *node = dev->of_node;
625 struct component_match *match = NULL;
626
627 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
628
629 if (node) {
630 struct device_node *core_node;
631 int i;
632
633 for (i = 0; ; i++) {
634 core_node = of_parse_phandle(node, "cores", i);
635 if (!core_node)
636 break;
637
638 component_match_add(&pdev->dev, &match, compare_of,
639 core_node);
640 of_node_put(core_node);
641 }
642 } else if (dev->platform_data) {
643 char **names = dev->platform_data;
644 unsigned i;
645
646 for (i = 0; names[i]; i++)
647 component_match_add(dev, &match, compare_str, names[i]);
648 }
649
650 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
651}
652
653static int etnaviv_pdev_remove(struct platform_device *pdev)
654{
655 component_master_del(&pdev->dev, &etnaviv_master_ops);
656
657 return 0;
658}
659
660static const struct of_device_id dt_match[] = {
661 { .compatible = "fsl,imx-gpu-subsystem" },
662 { .compatible = "marvell,dove-gpu-subsystem" },
663 {}
664};
665MODULE_DEVICE_TABLE(of, dt_match);
666
667static struct platform_driver etnaviv_platform_driver = {
668 .probe = etnaviv_pdev_probe,
669 .remove = etnaviv_pdev_remove,
670 .driver = {
671 .owner = THIS_MODULE,
672 .name = "etnaviv",
673 .of_match_table = dt_match,
674 },
675};
676
677static int __init etnaviv_init(void)
678{
679 int ret;
680
681 etnaviv_validate_init();
682
683 ret = platform_driver_register(&etnaviv_gpu_driver);
684 if (ret != 0)
685 return ret;
686
687 ret = platform_driver_register(&etnaviv_platform_driver);
688 if (ret != 0)
689 platform_driver_unregister(&etnaviv_gpu_driver);
690
691 return ret;
692}
693module_init(etnaviv_init);
694
695static void __exit etnaviv_exit(void)
696{
697 platform_driver_unregister(&etnaviv_gpu_driver);
698 platform_driver_unregister(&etnaviv_platform_driver);
699}
700module_exit(etnaviv_exit);
701
702MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
703MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
704MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
705MODULE_DESCRIPTION("etnaviv DRM Driver");
706MODULE_LICENSE("GPL v2");
707MODULE_ALIAS("platform:etnaviv");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
new file mode 100644
index 000000000000..d6bd438bd5be
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_DRV_H__
18#define __ETNAVIV_DRV_H__
19
20#include <linux/kernel.h>
21#include <linux/clk.h>
22#include <linux/cpufreq.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/pm.h>
26#include <linux/pm_runtime.h>
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/iommu.h>
30#include <linux/types.h>
31#include <linux/sizes.h>
32
33#include <drm/drmP.h>
34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_fb_helper.h>
36#include <drm/drm_gem.h>
37#include <drm/etnaviv_drm.h>
38
39struct etnaviv_cmdbuf;
40struct etnaviv_gpu;
41struct etnaviv_mmu;
42struct etnaviv_gem_object;
43struct etnaviv_gem_submit;
44
45struct etnaviv_file_private {
46 /* currently we don't do anything useful with this.. but when
47 * per-context address spaces are supported we'd keep track of
48 * the context's page-tables here.
49 */
50 int dummy;
51};
52
53struct etnaviv_drm_private {
54 int num_gpus;
55 struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
56
57 /* list of GEM objects: */
58 struct mutex gem_lock;
59 struct list_head gem_list;
60
61 struct workqueue_struct *wq;
62};
63
64static inline void etnaviv_queue_work(struct drm_device *dev,
65 struct work_struct *w)
66{
67 struct etnaviv_drm_private *priv = dev->dev_private;
68
69 queue_work(priv->wq, w);
70}
71
72int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
73 struct drm_file *file);
74
75int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
76int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
77int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
78int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
79 struct drm_gem_object *obj, u32 *iova);
80void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj);
81struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
82void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
83void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
84struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
85 struct dma_buf_attachment *attach, struct sg_table *sg);
86int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
87void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
88void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
89int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
90 struct timespec *timeout);
91int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
92void etnaviv_gem_free_object(struct drm_gem_object *obj);
93int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
94 u32 size, u32 flags, u32 *handle);
95struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
96 u32 size, u32 flags);
97struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
98 u32 size, u32 flags);
99int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
100 uintptr_t ptr, u32 size, u32 flags, u32 *handle);
101u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
102void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
103void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
104 struct etnaviv_cmdbuf *cmdbuf);
105void etnaviv_validate_init(void);
106bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
107 u32 *stream, unsigned int size,
108 struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
109
110#ifdef CONFIG_DEBUG_FS
111void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
112 struct seq_file *m);
113#endif
114
115void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
116 const char *dbgname);
117void etnaviv_writel(u32 data, void __iomem *addr);
118u32 etnaviv_readl(const void __iomem *addr);
119
120#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
121#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
122
123/*
124 * Return the storage size of a structure with a variable length array.
125 * The array is nelem elements of elem_size, where the base structure
126 * is defined by base. If the size overflows size_t, return zero.
127 */
128static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
129{
130 if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
131 return 0;
132 return base + nelem * elem_size;
133}
134
135/* returns true if fence a comes after fence b */
136static inline bool fence_after(u32 a, u32 b)
137{
138 return (s32)(a - b) > 0;
139}
140
141static inline bool fence_after_eq(u32 a, u32 b)
142{
143 return (s32)(a - b) >= 0;
144}
145
146static inline unsigned long etnaviv_timeout_to_jiffies(
147 const struct timespec *timeout)
148{
149 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
150 unsigned long start_jiffies = jiffies;
151 unsigned long remaining_jiffies;
152
153 if (time_after(start_jiffies, timeout_jiffies))
154 remaining_jiffies = 0;
155 else
156 remaining_jiffies = timeout_jiffies - start_jiffies;
157
158 return remaining_jiffies;
159}
160
161#endif /* __ETNAVIV_DRV_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
new file mode 100644
index 000000000000..bf8fa859e8be
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/devcoredump.h>
18#include "etnaviv_dump.h"
19#include "etnaviv_gem.h"
20#include "etnaviv_gpu.h"
21#include "etnaviv_mmu.h"
22#include "state.xml.h"
23#include "state_hi.xml.h"
24
25struct core_dump_iterator {
26 void *start;
27 struct etnaviv_dump_object_header *hdr;
28 void *data;
29};
30
31static const unsigned short etnaviv_dump_registers[] = {
32 VIVS_HI_AXI_STATUS,
33 VIVS_HI_CLOCK_CONTROL,
34 VIVS_HI_IDLE_STATE,
35 VIVS_HI_AXI_CONFIG,
36 VIVS_HI_INTR_ENBL,
37 VIVS_HI_CHIP_IDENTITY,
38 VIVS_HI_CHIP_FEATURE,
39 VIVS_HI_CHIP_MODEL,
40 VIVS_HI_CHIP_REV,
41 VIVS_HI_CHIP_DATE,
42 VIVS_HI_CHIP_TIME,
43 VIVS_HI_CHIP_MINOR_FEATURE_0,
44 VIVS_HI_CACHE_CONTROL,
45 VIVS_HI_AXI_CONTROL,
46 VIVS_PM_POWER_CONTROLS,
47 VIVS_PM_MODULE_CONTROLS,
48 VIVS_PM_MODULE_STATUS,
49 VIVS_PM_PULSE_EATER,
50 VIVS_MC_MMU_FE_PAGE_TABLE,
51 VIVS_MC_MMU_TX_PAGE_TABLE,
52 VIVS_MC_MMU_PE_PAGE_TABLE,
53 VIVS_MC_MMU_PEZ_PAGE_TABLE,
54 VIVS_MC_MMU_RA_PAGE_TABLE,
55 VIVS_MC_DEBUG_MEMORY,
56 VIVS_MC_MEMORY_BASE_ADDR_RA,
57 VIVS_MC_MEMORY_BASE_ADDR_FE,
58 VIVS_MC_MEMORY_BASE_ADDR_TX,
59 VIVS_MC_MEMORY_BASE_ADDR_PEZ,
60 VIVS_MC_MEMORY_BASE_ADDR_PE,
61 VIVS_MC_MEMORY_TIMING_CONTROL,
62 VIVS_MC_BUS_CONFIG,
63 VIVS_FE_DMA_STATUS,
64 VIVS_FE_DMA_DEBUG_STATE,
65 VIVS_FE_DMA_ADDRESS,
66 VIVS_FE_DMA_LOW,
67 VIVS_FE_DMA_HIGH,
68 VIVS_FE_AUTO_FLUSH,
69};
70
71static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
72 u32 type, void *data_end)
73{
74 struct etnaviv_dump_object_header *hdr = iter->hdr;
75
76 hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
77 hdr->type = cpu_to_le32(type);
78 hdr->file_offset = cpu_to_le32(iter->data - iter->start);
79 hdr->file_size = cpu_to_le32(data_end - iter->data);
80
81 iter->hdr++;
82 iter->data += hdr->file_size;
83}
84
85static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
86 struct etnaviv_gpu *gpu)
87{
88 struct etnaviv_dump_registers *reg = iter->data;
89 unsigned int i;
90
91 for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
92 reg->reg = etnaviv_dump_registers[i];
93 reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
94 }
95
96 etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
97}
98
99static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
100 struct etnaviv_gpu *gpu, size_t mmu_size)
101{
102 etnaviv_iommu_dump(gpu->mmu, iter->data);
103
104 etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
105}
106
107static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
108 void *ptr, size_t size, u64 iova)
109{
110 memcpy(iter->data, ptr, size);
111
112 iter->hdr->iova = cpu_to_le64(iova);
113
114 etnaviv_core_dump_header(iter, type, iter->data + size);
115}
116
117void etnaviv_core_dump(struct etnaviv_gpu *gpu)
118{
119 struct core_dump_iterator iter;
120 struct etnaviv_vram_mapping *vram;
121 struct etnaviv_gem_object *obj;
122 struct etnaviv_cmdbuf *cmd;
123 unsigned int n_obj, n_bomap_pages;
124 size_t file_size, mmu_size;
125 __le64 *bomap, *bomap_start;
126
127 mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
128
129 /* We always dump registers, mmu, ring and end marker */
130 n_obj = 4;
131 n_bomap_pages = 0;
132 file_size = ARRAY_SIZE(etnaviv_dump_registers) *
133 sizeof(struct etnaviv_dump_registers) +
134 mmu_size + gpu->buffer->size;
135
136 /* Add in the active command buffers */
137 list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
138 file_size += cmd->size;
139 n_obj++;
140 }
141
142 /* Add in the active buffer objects */
143 list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
144 if (!vram->use)
145 continue;
146
147 obj = vram->object;
148 file_size += obj->base.size;
149 n_bomap_pages += obj->base.size >> PAGE_SHIFT;
150 n_obj++;
151 }
152
153 /* If we have any buffer objects, add a bomap object */
154 if (n_bomap_pages) {
155 file_size += n_bomap_pages * sizeof(__le64);
156 n_obj++;
157 }
158
159 /* Add the size of the headers */
160 file_size += sizeof(*iter.hdr) * n_obj;
161
162 /* Allocate the file in vmalloc memory, it's likely to be big */
163 iter.start = vmalloc(file_size);
164 if (!iter.start) {
165 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
166 return;
167 }
168
169 /* Point the data member after the headers */
170 iter.hdr = iter.start;
171 iter.data = &iter.hdr[n_obj];
172
173 memset(iter.hdr, 0, iter.data - iter.start);
174
175 etnaviv_core_dump_registers(&iter, gpu);
176 etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
177 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
178 gpu->buffer->size, gpu->buffer->paddr);
179
180 list_for_each_entry(cmd, &gpu->active_cmd_list, node)
181 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
182 cmd->size, cmd->paddr);
183
184 /* Reserve space for the bomap */
185 if (n_bomap_pages) {
186 bomap_start = bomap = iter.data;
187 memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
188 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
189 bomap + n_bomap_pages);
190 } else {
191 /* Silence warning */
192 bomap_start = bomap = NULL;
193 }
194
195 list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
196 struct page **pages;
197 void *vaddr;
198
199 if (vram->use == 0)
200 continue;
201
202 obj = vram->object;
203
204 pages = etnaviv_gem_get_pages(obj);
205 if (pages) {
206 int j;
207
208 iter.hdr->data[0] = bomap - bomap_start;
209
210 for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
211 *bomap++ = cpu_to_le64(page_to_phys(*pages++));
212 }
213
214 iter.hdr->iova = cpu_to_le64(vram->iova);
215
216 vaddr = etnaviv_gem_vaddr(&obj->base);
217 if (vaddr && !IS_ERR(vaddr))
218 memcpy(iter.data, vaddr, obj->base.size);
219
220 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
221 obj->base.size);
222 }
223
224 etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
225
226 dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
227}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
new file mode 100644
index 000000000000..97f2f8db9133
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Etnaviv devcoredump file definitions
17 */
18#ifndef ETNAVIV_DUMP_H
19#define ETNAVIV_DUMP_H
20
21#include <linux/types.h>
22
23enum {
24 ETDUMP_MAGIC = 0x414e5445,
25 ETDUMP_BUF_REG = 0,
26 ETDUMP_BUF_MMU,
27 ETDUMP_BUF_RING,
28 ETDUMP_BUF_CMD,
29 ETDUMP_BUF_BOMAP,
30 ETDUMP_BUF_BO,
31 ETDUMP_BUF_END,
32};
33
34struct etnaviv_dump_object_header {
35 __le32 magic;
36 __le32 type;
37 __le32 file_offset;
38 __le32 file_size;
39 __le64 iova;
40 __le32 data[2];
41};
42
43/* Registers object, an array of these */
44struct etnaviv_dump_registers {
45 __le32 reg;
46 __le32 value;
47};
48
49#ifdef __KERNEL__
50struct etnaviv_gpu;
51void etnaviv_core_dump(struct etnaviv_gpu *gpu);
52#endif
53
54#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
new file mode 100644
index 000000000000..9f77c3b94cc6
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -0,0 +1,899 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_mmu.h"
24
25static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26{
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
29
30 /*
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
33 */
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36}
37
38static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39{
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
42
43 /*
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
46 *
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
51 * the region.
52 *
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
57 */
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60}
61
62/* called with etnaviv_obj->lock held */
63static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64{
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67
68 if (IS_ERR(p)) {
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 return PTR_ERR(p);
71 }
72
73 etnaviv_obj->pages = p;
74
75 return 0;
76}
77
78static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79{
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
85 }
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 true, false);
89
90 etnaviv_obj->pages = NULL;
91 }
92}
93
94struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95{
96 int ret;
97
98 lockdep_assert_held(&etnaviv_obj->lock);
99
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 if (ret < 0)
103 return ERR_PTR(ret);
104 }
105
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
110
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 if (IS_ERR(sgt)) {
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 PTR_ERR(sgt));
115 return ERR_CAST(sgt);
116 }
117
118 etnaviv_obj->sgt = sgt;
119
120 etnaviv_gem_scatter_map(etnaviv_obj);
121 }
122
123 return etnaviv_obj->pages;
124}
125
126void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127{
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
130}
131
132static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
133 struct vm_area_struct *vma)
134{
135 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
136 pgprot_t vm_page_prot;
137
138 vma->vm_flags &= ~VM_PFNMAP;
139 vma->vm_flags |= VM_MIXEDMAP;
140
141 vm_page_prot = vm_get_page_prot(vma->vm_flags);
142
143 if (etnaviv_obj->flags & ETNA_BO_WC) {
144 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
145 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
146 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
147 } else {
148 /*
149 * Shunt off cached objs to shmem file so they have their own
150 * address_space (so unmap_mapping_range does what we want,
151 * in particular in the case of mmap'd dmabufs)
152 */
153 fput(vma->vm_file);
154 get_file(obj->filp);
155 vma->vm_pgoff = 0;
156 vma->vm_file = obj->filp;
157
158 vma->vm_page_prot = vm_page_prot;
159 }
160
161 return 0;
162}
163
164int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
165{
166 struct etnaviv_gem_object *obj;
167 int ret;
168
169 ret = drm_gem_mmap(filp, vma);
170 if (ret) {
171 DBG("mmap failed: %d", ret);
172 return ret;
173 }
174
175 obj = to_etnaviv_bo(vma->vm_private_data);
176 return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
177}
178
179int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180{
181 struct drm_gem_object *obj = vma->vm_private_data;
182 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183 struct page **pages, *page;
184 pgoff_t pgoff;
185 int ret;
186
187 /*
188 * Make sure we don't parallel update on a fault, nor move or remove
189 * something from beneath our feet. Note that vm_insert_page() is
190 * specifically coded to take care of this, so we don't have to.
191 */
192 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
193 if (ret)
194 goto out;
195
196 /* make sure we have pages attached now */
197 pages = etnaviv_gem_get_pages(etnaviv_obj);
198 mutex_unlock(&etnaviv_obj->lock);
199
200 if (IS_ERR(pages)) {
201 ret = PTR_ERR(pages);
202 goto out;
203 }
204
205 /* We don't use vmf->pgoff since that has the fake offset: */
206 pgoff = ((unsigned long)vmf->virtual_address -
207 vma->vm_start) >> PAGE_SHIFT;
208
209 page = pages[pgoff];
210
211 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
212 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
213
214 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
215
216out:
217 switch (ret) {
218 case -EAGAIN:
219 case 0:
220 case -ERESTARTSYS:
221 case -EINTR:
222 case -EBUSY:
223 /*
224 * EBUSY is ok: this just means that another thread
225 * already did the job.
226 */
227 return VM_FAULT_NOPAGE;
228 case -ENOMEM:
229 return VM_FAULT_OOM;
230 default:
231 return VM_FAULT_SIGBUS;
232 }
233}
234
235int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
236{
237 int ret;
238
239 /* Make it mmapable */
240 ret = drm_gem_create_mmap_offset(obj);
241 if (ret)
242 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
243 else
244 *offset = drm_vma_node_offset_addr(&obj->vma_node);
245
246 return ret;
247}
248
249static struct etnaviv_vram_mapping *
250etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
251 struct etnaviv_iommu *mmu)
252{
253 struct etnaviv_vram_mapping *mapping;
254
255 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
256 if (mapping->mmu == mmu)
257 return mapping;
258 }
259
260 return NULL;
261}
262
263int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
264 struct drm_gem_object *obj, u32 *iova)
265{
266 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
267 struct etnaviv_vram_mapping *mapping;
268 struct page **pages;
269 int ret = 0;
270
271 mutex_lock(&etnaviv_obj->lock);
272 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
273 if (mapping) {
274 /*
275 * Holding the object lock prevents the use count changing
276 * beneath us. If the use count is zero, the MMU might be
277 * reaping this object, so take the lock and re-check that
278 * the MMU owns this mapping to close this race.
279 */
280 if (mapping->use == 0) {
281 mutex_lock(&gpu->mmu->lock);
282 if (mapping->mmu == gpu->mmu)
283 mapping->use += 1;
284 else
285 mapping = NULL;
286 mutex_unlock(&gpu->mmu->lock);
287 if (mapping)
288 goto out;
289 } else {
290 mapping->use += 1;
291 goto out;
292 }
293 }
294
295 pages = etnaviv_gem_get_pages(etnaviv_obj);
296 if (IS_ERR(pages)) {
297 ret = PTR_ERR(pages);
298 goto out;
299 }
300
301 /*
302 * See if we have a reaped vram mapping we can re-use before
303 * allocating a fresh mapping.
304 */
305 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
306 if (!mapping) {
307 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
308 if (!mapping) {
309 ret = -ENOMEM;
310 goto out;
311 }
312
313 INIT_LIST_HEAD(&mapping->scan_node);
314 mapping->object = etnaviv_obj;
315 } else {
316 list_del(&mapping->obj_node);
317 }
318
319 mapping->mmu = gpu->mmu;
320 mapping->use = 1;
321
322 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
323 mapping);
324 if (ret < 0)
325 kfree(mapping);
326 else
327 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
328
329out:
330 mutex_unlock(&etnaviv_obj->lock);
331
332 if (!ret) {
333 /* Take a reference on the object */
334 drm_gem_object_reference(obj);
335 *iova = mapping->iova;
336 }
337
338 return ret;
339}
340
341void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
342{
343 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
344 struct etnaviv_vram_mapping *mapping;
345
346 mutex_lock(&etnaviv_obj->lock);
347 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
348
349 WARN_ON(mapping->use == 0);
350 mapping->use -= 1;
351 mutex_unlock(&etnaviv_obj->lock);
352
353 drm_gem_object_unreference_unlocked(obj);
354}
355
356void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
357{
358 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
359
360 mutex_lock(&etnaviv_obj->lock);
361 if (!etnaviv_obj->vaddr) {
362 struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
363
364 if (IS_ERR(pages))
365 return ERR_CAST(pages);
366
367 etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
368 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
369 }
370 mutex_unlock(&etnaviv_obj->lock);
371
372 return etnaviv_obj->vaddr;
373}
374
375static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
376{
377 if (op & ETNA_PREP_READ)
378 return DMA_FROM_DEVICE;
379 else if (op & ETNA_PREP_WRITE)
380 return DMA_TO_DEVICE;
381 else
382 return DMA_BIDIRECTIONAL;
383}
384
385int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
386 struct timespec *timeout)
387{
388 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
389 struct drm_device *dev = obj->dev;
390 bool write = !!(op & ETNA_PREP_WRITE);
391 int ret;
392
393 if (op & ETNA_PREP_NOSYNC) {
394 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
395 write))
396 return -EBUSY;
397 } else {
398 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
399
400 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
401 write, true, remain);
402 if (ret <= 0)
403 return ret == 0 ? -ETIMEDOUT : ret;
404 }
405
406 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
407 if (!etnaviv_obj->sgt) {
408 void *ret;
409
410 mutex_lock(&etnaviv_obj->lock);
411 ret = etnaviv_gem_get_pages(etnaviv_obj);
412 mutex_unlock(&etnaviv_obj->lock);
413 if (IS_ERR(ret))
414 return PTR_ERR(ret);
415 }
416
417 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
418 etnaviv_obj->sgt->nents,
419 etnaviv_op_to_dma_dir(op));
420 etnaviv_obj->last_cpu_prep_op = op;
421 }
422
423 return 0;
424}
425
426int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
427{
428 struct drm_device *dev = obj->dev;
429 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
430
431 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
432 /* fini without a prep is almost certainly a userspace error */
433 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
434 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
435 etnaviv_obj->sgt->nents,
436 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
437 etnaviv_obj->last_cpu_prep_op = 0;
438 }
439
440 return 0;
441}
442
443int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
444 struct timespec *timeout)
445{
446 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
447
448 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
449}
450
451#ifdef CONFIG_DEBUG_FS
452static void etnaviv_gem_describe_fence(struct fence *fence,
453 const char *type, struct seq_file *m)
454{
455 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
456 seq_printf(m, "\t%9s: %s %s seq %u\n",
457 type,
458 fence->ops->get_driver_name(fence),
459 fence->ops->get_timeline_name(fence),
460 fence->seqno);
461}
462
463static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
464{
465 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
466 struct reservation_object *robj = etnaviv_obj->resv;
467 struct reservation_object_list *fobj;
468 struct fence *fence;
469 unsigned long off = drm_vma_node_start(&obj->vma_node);
470
471 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
472 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
473 obj->name, obj->refcount.refcount.counter,
474 off, etnaviv_obj->vaddr, obj->size);
475
476 rcu_read_lock();
477 fobj = rcu_dereference(robj->fence);
478 if (fobj) {
479 unsigned int i, shared_count = fobj->shared_count;
480
481 for (i = 0; i < shared_count; i++) {
482 fence = rcu_dereference(fobj->shared[i]);
483 etnaviv_gem_describe_fence(fence, "Shared", m);
484 }
485 }
486
487 fence = rcu_dereference(robj->fence_excl);
488 if (fence)
489 etnaviv_gem_describe_fence(fence, "Exclusive", m);
490 rcu_read_unlock();
491}
492
493void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
494 struct seq_file *m)
495{
496 struct etnaviv_gem_object *etnaviv_obj;
497 int count = 0;
498 size_t size = 0;
499
500 mutex_lock(&priv->gem_lock);
501 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
502 struct drm_gem_object *obj = &etnaviv_obj->base;
503
504 seq_puts(m, " ");
505 etnaviv_gem_describe(obj, m);
506 count++;
507 size += obj->size;
508 }
509 mutex_unlock(&priv->gem_lock);
510
511 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
512}
513#endif
514
515static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
516{
517 if (etnaviv_obj->vaddr)
518 vunmap(etnaviv_obj->vaddr);
519 put_pages(etnaviv_obj);
520}
521
522static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
523 .get_pages = etnaviv_gem_shmem_get_pages,
524 .release = etnaviv_gem_shmem_release,
525};
526
527void etnaviv_gem_free_object(struct drm_gem_object *obj)
528{
529 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
530 struct etnaviv_vram_mapping *mapping, *tmp;
531
532 /* object should not be active */
533 WARN_ON(is_active(etnaviv_obj));
534
535 list_del(&etnaviv_obj->gem_node);
536
537 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
538 obj_node) {
539 struct etnaviv_iommu *mmu = mapping->mmu;
540
541 WARN_ON(mapping->use);
542
543 if (mmu)
544 etnaviv_iommu_unmap_gem(mmu, mapping);
545
546 list_del(&mapping->obj_node);
547 kfree(mapping);
548 }
549
550 drm_gem_free_mmap_offset(obj);
551 etnaviv_obj->ops->release(etnaviv_obj);
552 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
553 reservation_object_fini(&etnaviv_obj->_resv);
554 drm_gem_object_release(obj);
555
556 kfree(etnaviv_obj);
557}
558
559int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
560{
561 struct etnaviv_drm_private *priv = dev->dev_private;
562 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
563
564 mutex_lock(&priv->gem_lock);
565 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
566 mutex_unlock(&priv->gem_lock);
567
568 return 0;
569}
570
571static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
572 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
573 struct drm_gem_object **obj)
574{
575 struct etnaviv_gem_object *etnaviv_obj;
576 unsigned sz = sizeof(*etnaviv_obj);
577 bool valid = true;
578
579 /* validate flags */
580 switch (flags & ETNA_BO_CACHE_MASK) {
581 case ETNA_BO_UNCACHED:
582 case ETNA_BO_CACHED:
583 case ETNA_BO_WC:
584 break;
585 default:
586 valid = false;
587 }
588
589 if (!valid) {
590 dev_err(dev->dev, "invalid cache flag: %x\n",
591 (flags & ETNA_BO_CACHE_MASK));
592 return -EINVAL;
593 }
594
595 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
596 if (!etnaviv_obj)
597 return -ENOMEM;
598
599 etnaviv_obj->flags = flags;
600 etnaviv_obj->ops = ops;
601 if (robj) {
602 etnaviv_obj->resv = robj;
603 } else {
604 etnaviv_obj->resv = &etnaviv_obj->_resv;
605 reservation_object_init(&etnaviv_obj->_resv);
606 }
607
608 mutex_init(&etnaviv_obj->lock);
609 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
610
611 *obj = &etnaviv_obj->base;
612
613 return 0;
614}
615
616static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
617 u32 size, u32 flags)
618{
619 struct drm_gem_object *obj = NULL;
620 int ret;
621
622 size = PAGE_ALIGN(size);
623
624 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
625 &etnaviv_gem_shmem_ops, &obj);
626 if (ret)
627 goto fail;
628
629 ret = drm_gem_object_init(dev, obj, size);
630 if (ret == 0) {
631 struct address_space *mapping;
632
633 /*
634 * Our buffers are kept pinned, so allocating them
635 * from the MOVABLE zone is a really bad idea, and
636 * conflicts with CMA. See coments above new_inode()
637 * why this is required _and_ expected if you're
638 * going to pin these pages.
639 */
640 mapping = file_inode(obj->filp)->i_mapping;
641 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
642 }
643
644 if (ret)
645 goto fail;
646
647 return obj;
648
649fail:
650 if (obj)
651 drm_gem_object_unreference_unlocked(obj);
652
653 return ERR_PTR(ret);
654}
655
656/* convenience method to construct a GEM buffer object, and userspace handle */
657int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
658 u32 size, u32 flags, u32 *handle)
659{
660 struct drm_gem_object *obj;
661 int ret;
662
663 obj = __etnaviv_gem_new(dev, size, flags);
664 if (IS_ERR(obj))
665 return PTR_ERR(obj);
666
667 ret = etnaviv_gem_obj_add(dev, obj);
668 if (ret < 0) {
669 drm_gem_object_unreference_unlocked(obj);
670 return ret;
671 }
672
673 ret = drm_gem_handle_create(file, obj, handle);
674
675 /* drop reference from allocate - handle holds it now */
676 drm_gem_object_unreference_unlocked(obj);
677
678 return ret;
679}
680
681struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
682 u32 size, u32 flags)
683{
684 struct drm_gem_object *obj;
685 int ret;
686
687 obj = __etnaviv_gem_new(dev, size, flags);
688 if (IS_ERR(obj))
689 return obj;
690
691 ret = etnaviv_gem_obj_add(dev, obj);
692 if (ret < 0) {
693 drm_gem_object_unreference_unlocked(obj);
694 return ERR_PTR(ret);
695 }
696
697 return obj;
698}
699
700int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
701 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
702 struct etnaviv_gem_object **res)
703{
704 struct drm_gem_object *obj;
705 int ret;
706
707 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
708 if (ret)
709 return ret;
710
711 drm_gem_private_object_init(dev, obj, size);
712
713 *res = to_etnaviv_bo(obj);
714
715 return 0;
716}
717
718struct get_pages_work {
719 struct work_struct work;
720 struct mm_struct *mm;
721 struct task_struct *task;
722 struct etnaviv_gem_object *etnaviv_obj;
723};
724
725static struct page **etnaviv_gem_userptr_do_get_pages(
726 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
727{
728 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
729 struct page **pvec;
730 uintptr_t ptr;
731
732 pvec = drm_malloc_ab(npages, sizeof(struct page *));
733 if (!pvec)
734 return ERR_PTR(-ENOMEM);
735
736 pinned = 0;
737 ptr = etnaviv_obj->userptr.ptr;
738
739 down_read(&mm->mmap_sem);
740 while (pinned < npages) {
741 ret = get_user_pages(task, mm, ptr, npages - pinned,
742 !etnaviv_obj->userptr.ro, 0,
743 pvec + pinned, NULL);
744 if (ret < 0)
745 break;
746
747 ptr += ret * PAGE_SIZE;
748 pinned += ret;
749 }
750 up_read(&mm->mmap_sem);
751
752 if (ret < 0) {
753 release_pages(pvec, pinned, 0);
754 drm_free_large(pvec);
755 return ERR_PTR(ret);
756 }
757
758 return pvec;
759}
760
761static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
762{
763 struct get_pages_work *work = container_of(_work, typeof(*work), work);
764 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
765 struct page **pvec;
766
767 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
768
769 mutex_lock(&etnaviv_obj->lock);
770 if (IS_ERR(pvec)) {
771 etnaviv_obj->userptr.work = ERR_CAST(pvec);
772 } else {
773 etnaviv_obj->userptr.work = NULL;
774 etnaviv_obj->pages = pvec;
775 }
776
777 mutex_unlock(&etnaviv_obj->lock);
778 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
779
780 mmput(work->mm);
781 put_task_struct(work->task);
782 kfree(work);
783}
784
785static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
786{
787 struct page **pvec = NULL;
788 struct get_pages_work *work;
789 struct mm_struct *mm;
790 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
791
792 if (etnaviv_obj->userptr.work) {
793 if (IS_ERR(etnaviv_obj->userptr.work)) {
794 ret = PTR_ERR(etnaviv_obj->userptr.work);
795 etnaviv_obj->userptr.work = NULL;
796 } else {
797 ret = -EAGAIN;
798 }
799 return ret;
800 }
801
802 mm = get_task_mm(etnaviv_obj->userptr.task);
803 pinned = 0;
804 if (mm == current->mm) {
805 pvec = drm_malloc_ab(npages, sizeof(struct page *));
806 if (!pvec) {
807 mmput(mm);
808 return -ENOMEM;
809 }
810
811 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
812 !etnaviv_obj->userptr.ro, pvec);
813 if (pinned < 0) {
814 drm_free_large(pvec);
815 mmput(mm);
816 return pinned;
817 }
818
819 if (pinned == npages) {
820 etnaviv_obj->pages = pvec;
821 mmput(mm);
822 return 0;
823 }
824 }
825
826 release_pages(pvec, pinned, 0);
827 drm_free_large(pvec);
828
829 work = kmalloc(sizeof(*work), GFP_KERNEL);
830 if (!work) {
831 mmput(mm);
832 return -ENOMEM;
833 }
834
835 get_task_struct(current);
836 drm_gem_object_reference(&etnaviv_obj->base);
837
838 work->mm = mm;
839 work->task = current;
840 work->etnaviv_obj = etnaviv_obj;
841
842 etnaviv_obj->userptr.work = &work->work;
843 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
844
845 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
846
847 return -EAGAIN;
848}
849
850static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
851{
852 if (etnaviv_obj->sgt) {
853 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
854 sg_free_table(etnaviv_obj->sgt);
855 kfree(etnaviv_obj->sgt);
856 }
857 if (etnaviv_obj->pages) {
858 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
859
860 release_pages(etnaviv_obj->pages, npages, 0);
861 drm_free_large(etnaviv_obj->pages);
862 }
863 put_task_struct(etnaviv_obj->userptr.task);
864}
865
866static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
867 .get_pages = etnaviv_gem_userptr_get_pages,
868 .release = etnaviv_gem_userptr_release,
869};
870
871int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
872 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
873{
874 struct etnaviv_gem_object *etnaviv_obj;
875 int ret;
876
877 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
878 &etnaviv_gem_userptr_ops, &etnaviv_obj);
879 if (ret)
880 return ret;
881
882 etnaviv_obj->userptr.ptr = ptr;
883 etnaviv_obj->userptr.task = current;
884 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
885 get_task_struct(current);
886
887 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
888 if (ret) {
889 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
890 return ret;
891 }
892
893 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
894
895 /* drop reference from allocate - handle holds it now */
896 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
897
898 return ret;
899}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
new file mode 100644
index 000000000000..a300b4b3d545
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_GEM_H__
18#define __ETNAVIV_GEM_H__
19
20#include <linux/reservation.h>
21#include "etnaviv_drv.h"
22
23struct etnaviv_gem_ops;
24struct etnaviv_gem_object;
25
26struct etnaviv_gem_userptr {
27 uintptr_t ptr;
28 struct task_struct *task;
29 struct work_struct *work;
30 bool ro;
31};
32
33struct etnaviv_vram_mapping {
34 struct list_head obj_node;
35 struct list_head scan_node;
36 struct list_head mmu_node;
37 struct etnaviv_gem_object *object;
38 struct etnaviv_iommu *mmu;
39 struct drm_mm_node vram_node;
40 unsigned int use;
41 u32 iova;
42};
43
44struct etnaviv_gem_object {
45 struct drm_gem_object base;
46 const struct etnaviv_gem_ops *ops;
47 struct mutex lock;
48
49 u32 flags;
50
51 struct list_head gem_node;
52 struct etnaviv_gpu *gpu; /* non-null if active */
53 atomic_t gpu_active;
54 u32 access;
55
56 struct page **pages;
57 struct sg_table *sgt;
58 void *vaddr;
59
60 /* normally (resv == &_resv) except for imported bo's */
61 struct reservation_object *resv;
62 struct reservation_object _resv;
63
64 struct list_head vram_list;
65
66 /* cache maintenance */
67 u32 last_cpu_prep_op;
68
69 struct etnaviv_gem_userptr userptr;
70};
71
72static inline
73struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
74{
75 return container_of(obj, struct etnaviv_gem_object, base);
76}
77
78struct etnaviv_gem_ops {
79 int (*get_pages)(struct etnaviv_gem_object *);
80 void (*release)(struct etnaviv_gem_object *);
81};
82
83static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
84{
85 return atomic_read(&etnaviv_obj->gpu_active) != 0;
86}
87
88#define MAX_CMDS 4
89
90/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
91 * associated with the cmdstream submission for synchronization (and
92 * make it easier to unwind when things go wrong, etc). This only
93 * lasts for the duration of the submit-ioctl.
94 */
95struct etnaviv_gem_submit {
96 struct drm_device *dev;
97 struct etnaviv_gpu *gpu;
98 struct ww_acquire_ctx ticket;
99 u32 fence;
100 unsigned int nr_bos;
101 struct {
102 u32 flags;
103 struct etnaviv_gem_object *obj;
104 u32 iova;
105 } bos[0];
106};
107
108int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
109 struct timespec *timeout);
110int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
111 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
112 struct etnaviv_gem_object **res);
113int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
114struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
115void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
116
117#endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
new file mode 100644
index 000000000000..e94db4f95770
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/dma-buf.h>
19#include "etnaviv_drv.h"
20#include "etnaviv_gem.h"
21
22
23struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{
25 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
26
27 BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */
28
29 return etnaviv_obj->sgt;
30}
31
32void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
33{
34 return etnaviv_gem_vaddr(obj);
35}
36
37void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
38{
39 /* TODO msm_gem_vunmap() */
40}
41
42int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
43{
44 if (!obj->import_attach) {
45 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
46
47 mutex_lock(&etnaviv_obj->lock);
48 etnaviv_gem_get_pages(etnaviv_obj);
49 mutex_unlock(&etnaviv_obj->lock);
50 }
51 return 0;
52}
53
54void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
55{
56 if (!obj->import_attach) {
57 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
58
59 mutex_lock(&etnaviv_obj->lock);
60 etnaviv_gem_put_pages(to_etnaviv_bo(obj));
61 mutex_unlock(&etnaviv_obj->lock);
62 }
63}
64
65static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
66{
67 if (etnaviv_obj->vaddr)
68 dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
69 etnaviv_obj->vaddr);
70
71 /* Don't drop the pages for imported dmabuf, as they are not
72 * ours, just free the array we allocated:
73 */
74 if (etnaviv_obj->pages)
75 drm_free_large(etnaviv_obj->pages);
76
77 drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
78}
79
80static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
81 /* .get_pages should never be called */
82 .release = etnaviv_gem_prime_release,
83};
84
85struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
86 struct dma_buf_attachment *attach, struct sg_table *sgt)
87{
88 struct etnaviv_gem_object *etnaviv_obj;
89 size_t size = PAGE_ALIGN(attach->dmabuf->size);
90 int ret, npages;
91
92 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
93 attach->dmabuf->resv,
94 &etnaviv_gem_prime_ops, &etnaviv_obj);
95 if (ret < 0)
96 return ERR_PTR(ret);
97
98 npages = size / PAGE_SIZE;
99
100 etnaviv_obj->sgt = sgt;
101 etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
102 if (!etnaviv_obj->pages) {
103 ret = -ENOMEM;
104 goto fail;
105 }
106
107 ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
108 NULL, npages);
109 if (ret)
110 goto fail;
111
112 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
113 if (ret)
114 goto fail;
115
116 return &etnaviv_obj->base;
117
118fail:
119 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
120
121 return ERR_PTR(ret);
122}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
new file mode 100644
index 000000000000..1aba01a999df
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -0,0 +1,443 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/reservation.h>
18#include "etnaviv_drv.h"
19#include "etnaviv_gpu.h"
20#include "etnaviv_gem.h"
21
22/*
23 * Cmdstream submission:
24 */
25
26#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
27/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
28#define BO_LOCKED 0x4000
29#define BO_PINNED 0x2000
30
31static inline void __user *to_user_ptr(u64 address)
32{
33 return (void __user *)(uintptr_t)address;
34}
35
36static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
37 struct etnaviv_gpu *gpu, size_t nr)
38{
39 struct etnaviv_gem_submit *submit;
40 size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
41
42 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
43 if (submit) {
44 submit->dev = dev;
45 submit->gpu = gpu;
46
47 /* initially, until copy_from_user() and bo lookup succeeds: */
48 submit->nr_bos = 0;
49
50 ww_acquire_init(&submit->ticket, &reservation_ww_class);
51 }
52
53 return submit;
54}
55
56static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
57 struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
58 unsigned nr_bos)
59{
60 struct drm_etnaviv_gem_submit_bo *bo;
61 unsigned i;
62 int ret = 0;
63
64 spin_lock(&file->table_lock);
65
66 for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
67 struct drm_gem_object *obj;
68
69 if (bo->flags & BO_INVALID_FLAGS) {
70 DRM_ERROR("invalid flags: %x\n", bo->flags);
71 ret = -EINVAL;
72 goto out_unlock;
73 }
74
75 submit->bos[i].flags = bo->flags;
76
77 /* normally use drm_gem_object_lookup(), but for bulk lookup
78 * all under single table_lock just hit object_idr directly:
79 */
80 obj = idr_find(&file->object_idr, bo->handle);
81 if (!obj) {
82 DRM_ERROR("invalid handle %u at index %u\n",
83 bo->handle, i);
84 ret = -EINVAL;
85 goto out_unlock;
86 }
87
88 /*
89 * Take a refcount on the object. The file table lock
90 * prevents the object_idr's refcount on this being dropped.
91 */
92 drm_gem_object_reference(obj);
93
94 submit->bos[i].obj = to_etnaviv_bo(obj);
95 }
96
97out_unlock:
98 submit->nr_bos = i;
99 spin_unlock(&file->table_lock);
100
101 return ret;
102}
103
104static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
105{
106 if (submit->bos[i].flags & BO_LOCKED) {
107 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
108
109 ww_mutex_unlock(&etnaviv_obj->resv->lock);
110 submit->bos[i].flags &= ~BO_LOCKED;
111 }
112}
113
114static int submit_lock_objects(struct etnaviv_gem_submit *submit)
115{
116 int contended, slow_locked = -1, i, ret = 0;
117
118retry:
119 for (i = 0; i < submit->nr_bos; i++) {
120 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
121
122 if (slow_locked == i)
123 slow_locked = -1;
124
125 contended = i;
126
127 if (!(submit->bos[i].flags & BO_LOCKED)) {
128 ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
129 &submit->ticket);
130 if (ret == -EALREADY)
131 DRM_ERROR("BO at index %u already on submit list\n",
132 i);
133 if (ret)
134 goto fail;
135 submit->bos[i].flags |= BO_LOCKED;
136 }
137 }
138
139 ww_acquire_done(&submit->ticket);
140
141 return 0;
142
143fail:
144 for (; i >= 0; i--)
145 submit_unlock_object(submit, i);
146
147 if (slow_locked > 0)
148 submit_unlock_object(submit, slow_locked);
149
150 if (ret == -EDEADLK) {
151 struct etnaviv_gem_object *etnaviv_obj;
152
153 etnaviv_obj = submit->bos[contended].obj;
154
155 /* we lost out in a seqno race, lock and retry.. */
156 ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
157 &submit->ticket);
158 if (!ret) {
159 submit->bos[contended].flags |= BO_LOCKED;
160 slow_locked = contended;
161 goto retry;
162 }
163 }
164
165 return ret;
166}
167
168static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
169{
170 unsigned int context = submit->gpu->fence_context;
171 int i, ret = 0;
172
173 for (i = 0; i < submit->nr_bos; i++) {
174 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
175 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
176
177 ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
178 if (ret)
179 break;
180 }
181
182 return ret;
183}
184
185static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
186{
187 int i;
188
189 for (i = 0; i < submit->nr_bos; i++) {
190 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
191
192 if (submit->bos[i].flags & BO_PINNED)
193 etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base);
194
195 submit->bos[i].iova = 0;
196 submit->bos[i].flags &= ~BO_PINNED;
197 }
198}
199
200static int submit_pin_objects(struct etnaviv_gem_submit *submit)
201{
202 int i, ret = 0;
203
204 for (i = 0; i < submit->nr_bos; i++) {
205 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
206 u32 iova;
207
208 ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base,
209 &iova);
210 if (ret)
211 break;
212
213 submit->bos[i].flags |= BO_PINNED;
214 submit->bos[i].iova = iova;
215 }
216
217 return ret;
218}
219
220static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
221 struct etnaviv_gem_object **obj, u32 *iova)
222{
223 if (idx >= submit->nr_bos) {
224 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
225 idx, submit->nr_bos);
226 return -EINVAL;
227 }
228
229 if (obj)
230 *obj = submit->bos[idx].obj;
231 if (iova)
232 *iova = submit->bos[idx].iova;
233
234 return 0;
235}
236
237/* process the reloc's and patch up the cmdstream as needed: */
238static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
239 u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
240 u32 nr_relocs)
241{
242 u32 i, last_offset = 0;
243 u32 *ptr = stream;
244 int ret;
245
246 for (i = 0; i < nr_relocs; i++) {
247 const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
248 struct etnaviv_gem_object *bobj;
249 u32 iova, off;
250
251 if (unlikely(r->flags)) {
252 DRM_ERROR("invalid reloc flags\n");
253 return -EINVAL;
254 }
255
256 if (r->submit_offset % 4) {
257 DRM_ERROR("non-aligned reloc offset: %u\n",
258 r->submit_offset);
259 return -EINVAL;
260 }
261
262 /* offset in dwords: */
263 off = r->submit_offset / 4;
264
265 if ((off >= size ) ||
266 (off < last_offset)) {
267 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
268 return -EINVAL;
269 }
270
271 ret = submit_bo(submit, r->reloc_idx, &bobj, &iova);
272 if (ret)
273 return ret;
274
275 if (r->reloc_offset >=
276 bobj->base.size - sizeof(*ptr)) {
277 DRM_ERROR("relocation %u outside object", i);
278 return -EINVAL;
279 }
280
281 ptr[off] = iova + r->reloc_offset;
282
283 last_offset = off;
284 }
285
286 return 0;
287}
288
289static void submit_cleanup(struct etnaviv_gem_submit *submit)
290{
291 unsigned i;
292
293 for (i = 0; i < submit->nr_bos; i++) {
294 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
295
296 submit_unlock_object(submit, i);
297 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
298 }
299
300 ww_acquire_fini(&submit->ticket);
301 kfree(submit);
302}
303
304int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
305 struct drm_file *file)
306{
307 struct etnaviv_drm_private *priv = dev->dev_private;
308 struct drm_etnaviv_gem_submit *args = data;
309 struct drm_etnaviv_gem_submit_reloc *relocs;
310 struct drm_etnaviv_gem_submit_bo *bos;
311 struct etnaviv_gem_submit *submit;
312 struct etnaviv_cmdbuf *cmdbuf;
313 struct etnaviv_gpu *gpu;
314 void *stream;
315 int ret;
316
317 if (args->pipe >= ETNA_MAX_PIPES)
318 return -EINVAL;
319
320 gpu = priv->gpu[args->pipe];
321 if (!gpu)
322 return -ENXIO;
323
324 if (args->stream_size % 4) {
325 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
326 args->stream_size);
327 return -EINVAL;
328 }
329
330 if (args->exec_state != ETNA_PIPE_3D &&
331 args->exec_state != ETNA_PIPE_2D &&
332 args->exec_state != ETNA_PIPE_VG) {
333 DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
334 return -EINVAL;
335 }
336
337 /*
338 * Copy the command submission and bo array to kernel space in
339 * one go, and do this outside of any locks.
340 */
341 bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
342 relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
343 stream = drm_malloc_ab(1, args->stream_size);
344 cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
345 args->nr_bos);
346 if (!bos || !relocs || !stream || !cmdbuf) {
347 ret = -ENOMEM;
348 goto err_submit_cmds;
349 }
350
351 cmdbuf->exec_state = args->exec_state;
352 cmdbuf->ctx = file->driver_priv;
353
354 ret = copy_from_user(bos, to_user_ptr(args->bos),
355 args->nr_bos * sizeof(*bos));
356 if (ret) {
357 ret = -EFAULT;
358 goto err_submit_cmds;
359 }
360
361 ret = copy_from_user(relocs, to_user_ptr(args->relocs),
362 args->nr_relocs * sizeof(*relocs));
363 if (ret) {
364 ret = -EFAULT;
365 goto err_submit_cmds;
366 }
367
368 ret = copy_from_user(stream, to_user_ptr(args->stream),
369 args->stream_size);
370 if (ret) {
371 ret = -EFAULT;
372 goto err_submit_cmds;
373 }
374
375 submit = submit_create(dev, gpu, args->nr_bos);
376 if (!submit) {
377 ret = -ENOMEM;
378 goto err_submit_cmds;
379 }
380
381 ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
382 if (ret)
383 goto err_submit_objects;
384
385 ret = submit_lock_objects(submit);
386 if (ret)
387 goto err_submit_objects;
388
389 if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
390 relocs, args->nr_relocs)) {
391 ret = -EINVAL;
392 goto err_submit_objects;
393 }
394
395 ret = submit_fence_sync(submit);
396 if (ret)
397 goto err_submit_objects;
398
399 ret = submit_pin_objects(submit);
400 if (ret)
401 goto out;
402
403 ret = submit_reloc(submit, stream, args->stream_size / 4,
404 relocs, args->nr_relocs);
405 if (ret)
406 goto out;
407
408 memcpy(cmdbuf->vaddr, stream, args->stream_size);
409 cmdbuf->user_size = ALIGN(args->stream_size, 8);
410
411 ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
412 if (ret == 0)
413 cmdbuf = NULL;
414
415 args->fence = submit->fence;
416
417out:
418 submit_unpin_objects(submit);
419
420 /*
421 * If we're returning -EAGAIN, it may be due to the userptr code
422 * wanting to run its workqueue outside of any locks. Flush our
423 * workqueue to ensure that it is run in a timely manner.
424 */
425 if (ret == -EAGAIN)
426 flush_workqueue(priv->wq);
427
428err_submit_objects:
429 submit_cleanup(submit);
430
431err_submit_cmds:
432 /* if we still own the cmdbuf */
433 if (cmdbuf)
434 etnaviv_gpu_cmdbuf_free(cmdbuf);
435 if (stream)
436 drm_free_large(stream);
437 if (bos)
438 drm_free_large(bos);
439 if (relocs)
440 drm_free_large(relocs);
441
442 return ret;
443}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
new file mode 100644
index 000000000000..056a72e6ed26
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -0,0 +1,1647 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/component.h>
18#include <linux/fence.h>
19#include <linux/moduleparam.h>
20#include <linux/of_device.h>
21#include "etnaviv_dump.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_gem.h"
24#include "etnaviv_mmu.h"
25#include "etnaviv_iommu.h"
26#include "etnaviv_iommu_v2.h"
27#include "common.xml.h"
28#include "state.xml.h"
29#include "state_hi.xml.h"
30#include "cmdstream.xml.h"
31
32static const struct platform_device_id gpu_ids[] = {
33 { .name = "etnaviv-gpu,2d" },
34 { },
35};
36
37static bool etnaviv_dump_core = true;
38module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
39
40/*
41 * Driver functions:
42 */
43
44int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
45{
46 switch (param) {
47 case ETNAVIV_PARAM_GPU_MODEL:
48 *value = gpu->identity.model;
49 break;
50
51 case ETNAVIV_PARAM_GPU_REVISION:
52 *value = gpu->identity.revision;
53 break;
54
55 case ETNAVIV_PARAM_GPU_FEATURES_0:
56 *value = gpu->identity.features;
57 break;
58
59 case ETNAVIV_PARAM_GPU_FEATURES_1:
60 *value = gpu->identity.minor_features0;
61 break;
62
63 case ETNAVIV_PARAM_GPU_FEATURES_2:
64 *value = gpu->identity.minor_features1;
65 break;
66
67 case ETNAVIV_PARAM_GPU_FEATURES_3:
68 *value = gpu->identity.minor_features2;
69 break;
70
71 case ETNAVIV_PARAM_GPU_FEATURES_4:
72 *value = gpu->identity.minor_features3;
73 break;
74
75 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
76 *value = gpu->identity.stream_count;
77 break;
78
79 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
80 *value = gpu->identity.register_max;
81 break;
82
83 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
84 *value = gpu->identity.thread_count;
85 break;
86
87 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
88 *value = gpu->identity.vertex_cache_size;
89 break;
90
91 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
92 *value = gpu->identity.shader_core_count;
93 break;
94
95 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
96 *value = gpu->identity.pixel_pipes;
97 break;
98
99 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
100 *value = gpu->identity.vertex_output_buffer_size;
101 break;
102
103 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
104 *value = gpu->identity.buffer_size;
105 break;
106
107 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
108 *value = gpu->identity.instruction_count;
109 break;
110
111 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
112 *value = gpu->identity.num_constants;
113 break;
114
115 default:
116 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
117 return -EINVAL;
118 }
119
120 return 0;
121}
122
123static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
124{
125 if (gpu->identity.minor_features0 &
126 chipMinorFeatures0_MORE_MINOR_FEATURES) {
127 u32 specs[2];
128
129 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
130 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
131
132 gpu->identity.stream_count =
133 (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
134 >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
135 gpu->identity.register_max =
136 (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
137 >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
138 gpu->identity.thread_count =
139 (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
140 >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
141 gpu->identity.vertex_cache_size =
142 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
143 >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
144 gpu->identity.shader_core_count =
145 (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
146 >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
147 gpu->identity.pixel_pipes =
148 (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
149 >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
150 gpu->identity.vertex_output_buffer_size =
151 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
152 >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
153
154 gpu->identity.buffer_size =
155 (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
156 >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
157 gpu->identity.instruction_count =
158 (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
159 >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
160 gpu->identity.num_constants =
161 (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
162 >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
163 }
164
165 /* Fill in the stream count if not specified */
166 if (gpu->identity.stream_count == 0) {
167 if (gpu->identity.model >= 0x1000)
168 gpu->identity.stream_count = 4;
169 else
170 gpu->identity.stream_count = 1;
171 }
172
173 /* Convert the register max value */
174 if (gpu->identity.register_max)
175 gpu->identity.register_max = 1 << gpu->identity.register_max;
176 else if (gpu->identity.model == 0x0400)
177 gpu->identity.register_max = 32;
178 else
179 gpu->identity.register_max = 64;
180
181 /* Convert thread count */
182 if (gpu->identity.thread_count)
183 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
184 else if (gpu->identity.model == 0x0400)
185 gpu->identity.thread_count = 64;
186 else if (gpu->identity.model == 0x0500 ||
187 gpu->identity.model == 0x0530)
188 gpu->identity.thread_count = 128;
189 else
190 gpu->identity.thread_count = 256;
191
192 if (gpu->identity.vertex_cache_size == 0)
193 gpu->identity.vertex_cache_size = 8;
194
195 if (gpu->identity.shader_core_count == 0) {
196 if (gpu->identity.model >= 0x1000)
197 gpu->identity.shader_core_count = 2;
198 else
199 gpu->identity.shader_core_count = 1;
200 }
201
202 if (gpu->identity.pixel_pipes == 0)
203 gpu->identity.pixel_pipes = 1;
204
205 /* Convert virtex buffer size */
206 if (gpu->identity.vertex_output_buffer_size) {
207 gpu->identity.vertex_output_buffer_size =
208 1 << gpu->identity.vertex_output_buffer_size;
209 } else if (gpu->identity.model == 0x0400) {
210 if (gpu->identity.revision < 0x4000)
211 gpu->identity.vertex_output_buffer_size = 512;
212 else if (gpu->identity.revision < 0x4200)
213 gpu->identity.vertex_output_buffer_size = 256;
214 else
215 gpu->identity.vertex_output_buffer_size = 128;
216 } else {
217 gpu->identity.vertex_output_buffer_size = 512;
218 }
219
220 switch (gpu->identity.instruction_count) {
221 case 0:
222 if ((gpu->identity.model == 0x2000 &&
223 gpu->identity.revision == 0x5108) ||
224 gpu->identity.model == 0x880)
225 gpu->identity.instruction_count = 512;
226 else
227 gpu->identity.instruction_count = 256;
228 break;
229
230 case 1:
231 gpu->identity.instruction_count = 1024;
232 break;
233
234 case 2:
235 gpu->identity.instruction_count = 2048;
236 break;
237
238 default:
239 gpu->identity.instruction_count = 256;
240 break;
241 }
242
243 if (gpu->identity.num_constants == 0)
244 gpu->identity.num_constants = 168;
245}
246
247static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
248{
249 u32 chipIdentity;
250
251 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
252
253 /* Special case for older graphic cores. */
254 if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
255 >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) {
256 gpu->identity.model = 0x500; /* gc500 */
257 gpu->identity.revision =
258 (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
259 >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT;
260 } else {
261
262 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
263 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
264
265 /*
266 * !!!! HACK ALERT !!!!
267 * Because people change device IDs without letting software
268 * know about it - here is the hack to make it all look the
269 * same. Only for GC400 family.
270 */
271 if ((gpu->identity.model & 0xff00) == 0x0400 &&
272 gpu->identity.model != 0x0420) {
273 gpu->identity.model = gpu->identity.model & 0x0400;
274 }
275
276 /* Another special case */
277 if (gpu->identity.model == 0x300 &&
278 gpu->identity.revision == 0x2201) {
279 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
280 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
281
282 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
283 /*
284 * This IP has an ECO; put the correct
285 * revision in it.
286 */
287 gpu->identity.revision = 0x1051;
288 }
289 }
290 }
291
292 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
293 gpu->identity.model, gpu->identity.revision);
294
295 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
296
297 /* Disable fast clear on GC700. */
298 if (gpu->identity.model == 0x700)
299 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
300
301 if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
302 (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
303
304 /*
305 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
306 * registers.
307 */
308 gpu->identity.minor_features0 = 0;
309 gpu->identity.minor_features1 = 0;
310 gpu->identity.minor_features2 = 0;
311 gpu->identity.minor_features3 = 0;
312 } else
313 gpu->identity.minor_features0 =
314 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
315
316 if (gpu->identity.minor_features0 &
317 chipMinorFeatures0_MORE_MINOR_FEATURES) {
318 gpu->identity.minor_features1 =
319 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
320 gpu->identity.minor_features2 =
321 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
322 gpu->identity.minor_features3 =
323 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
324 }
325
326 /* GC600 idle register reports zero bits where modules aren't present */
327 if (gpu->identity.model == chipModel_GC600) {
328 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
329 VIVS_HI_IDLE_STATE_RA |
330 VIVS_HI_IDLE_STATE_SE |
331 VIVS_HI_IDLE_STATE_PA |
332 VIVS_HI_IDLE_STATE_SH |
333 VIVS_HI_IDLE_STATE_PE |
334 VIVS_HI_IDLE_STATE_DE |
335 VIVS_HI_IDLE_STATE_FE;
336 } else {
337 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
338 }
339
340 etnaviv_hw_specs(gpu);
341}
342
343static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
344{
345 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
346 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
347 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
348}
349
350static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
351{
352 u32 control, idle;
353 unsigned long timeout;
354 bool failed = true;
355
356 /* TODO
357 *
358 * - clock gating
359 * - puls eater
360 * - what about VG?
361 */
362
363 /* We hope that the GPU resets in under one second */
364 timeout = jiffies + msecs_to_jiffies(1000);
365
366 while (time_is_after_jiffies(timeout)) {
367 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
368 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
369
370 /* enable clock */
371 etnaviv_gpu_load_clock(gpu, control);
372
373 /* Wait for stable clock. Vivante's code waited for 1ms */
374 usleep_range(1000, 10000);
375
376 /* isolate the GPU. */
377 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
378 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
379
380 /* set soft reset. */
381 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
382 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
383
384 /* wait for reset. */
385 msleep(1);
386
387 /* reset soft reset bit. */
388 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
389 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
390
391 /* reset GPU isolation. */
392 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
393 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
394
395 /* read idle register. */
396 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
397
398 /* try reseting again if FE it not idle */
399 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
400 dev_dbg(gpu->dev, "FE is not idle\n");
401 continue;
402 }
403
404 /* read reset register. */
405 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
406
407 /* is the GPU idle? */
408 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
409 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
410 dev_dbg(gpu->dev, "GPU is not idle\n");
411 continue;
412 }
413
414 failed = false;
415 break;
416 }
417
418 if (failed) {
419 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
420 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
421
422 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
423 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
424 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
425 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
426
427 return -EBUSY;
428 }
429
430 /* We rely on the GPU running, so program the clock */
431 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
432 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
433
434 /* enable clock */
435 etnaviv_gpu_load_clock(gpu, control);
436
437 return 0;
438}
439
440static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
441{
442 u16 prefetch;
443
444 if (gpu->identity.model == chipModel_GC320 &&
445 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
446 (gpu->identity.revision == 0x5007 ||
447 gpu->identity.revision == 0x5220)) {
448 u32 mc_memory_debug;
449
450 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
451
452 if (gpu->identity.revision == 0x5007)
453 mc_memory_debug |= 0x0c;
454 else
455 mc_memory_debug |= 0x08;
456
457 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
458 }
459
460 /*
461 * Update GPU AXI cache atttribute to "cacheable, no allocate".
462 * This is necessary to prevent the iMX6 SoC locking up.
463 */
464 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
465 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
466 VIVS_HI_AXI_CONFIG_ARCACHE(2));
467
468 /* GC2000 rev 5108 needs a special bus config */
469 if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
470 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
471 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
472 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
473 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
474 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
475 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
476 }
477
478 /* set base addresses */
479 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
480 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
481 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
482 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
483 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
484
485 /* setup the MMU page table pointers */
486 etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
487
488 /* Start command processor */
489 prefetch = etnaviv_buffer_init(gpu);
490
491 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
492 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
493 gpu->buffer->paddr - gpu->memory_base);
494 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
495 VIVS_FE_COMMAND_CONTROL_ENABLE |
496 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
497}
498
499int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
500{
501 int ret, i;
502 struct iommu_domain *iommu;
503 enum etnaviv_iommu_version version;
504 bool mmuv2;
505
506 ret = pm_runtime_get_sync(gpu->dev);
507 if (ret < 0)
508 return ret;
509
510 etnaviv_hw_identify(gpu);
511
512 if (gpu->identity.model == 0) {
513 dev_err(gpu->dev, "Unknown GPU model\n");
514 pm_runtime_put_autosuspend(gpu->dev);
515 return -ENXIO;
516 }
517
518 ret = etnaviv_hw_reset(gpu);
519 if (ret)
520 goto fail;
521
522 /* Setup IOMMU.. eventually we will (I think) do this once per context
523 * and have separate page tables per context. For now, to keep things
524 * simple and to get something working, just use a single address space:
525 */
526 mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
527 dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
528
529 if (!mmuv2) {
530 iommu = etnaviv_iommu_domain_alloc(gpu);
531 version = ETNAVIV_IOMMU_V1;
532 } else {
533 iommu = etnaviv_iommu_v2_domain_alloc(gpu);
534 version = ETNAVIV_IOMMU_V2;
535 }
536
537 if (!iommu) {
538 ret = -ENOMEM;
539 goto fail;
540 }
541
542 /* TODO: we will leak here memory - fix it! */
543
544 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
545 if (!gpu->mmu) {
546 ret = -ENOMEM;
547 goto fail;
548 }
549
550 /* Create buffer: */
551 gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
552 if (!gpu->buffer) {
553 ret = -ENOMEM;
554 dev_err(gpu->dev, "could not create command buffer\n");
555 goto fail;
556 }
557 if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
558 ret = -EINVAL;
559 dev_err(gpu->dev,
560 "command buffer outside valid memory window\n");
561 goto free_buffer;
562 }
563
564 /* Setup event management */
565 spin_lock_init(&gpu->event_spinlock);
566 init_completion(&gpu->event_free);
567 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
568 gpu->event[i].used = false;
569 complete(&gpu->event_free);
570 }
571
572 /* Now program the hardware */
573 mutex_lock(&gpu->lock);
574 etnaviv_gpu_hw_init(gpu);
575 mutex_unlock(&gpu->lock);
576
577 pm_runtime_mark_last_busy(gpu->dev);
578 pm_runtime_put_autosuspend(gpu->dev);
579
580 return 0;
581
582free_buffer:
583 etnaviv_gpu_cmdbuf_free(gpu->buffer);
584 gpu->buffer = NULL;
585fail:
586 pm_runtime_mark_last_busy(gpu->dev);
587 pm_runtime_put_autosuspend(gpu->dev);
588
589 return ret;
590}
591
592#ifdef CONFIG_DEBUG_FS
593struct dma_debug {
594 u32 address[2];
595 u32 state[2];
596};
597
598static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
599{
600 u32 i;
601
602 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
603 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
604
605 for (i = 0; i < 500; i++) {
606 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
607 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
608
609 if (debug->address[0] != debug->address[1])
610 break;
611
612 if (debug->state[0] != debug->state[1])
613 break;
614 }
615}
616
617int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
618{
619 struct dma_debug debug;
620 u32 dma_lo, dma_hi, axi, idle;
621 int ret;
622
623 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
624
625 ret = pm_runtime_get_sync(gpu->dev);
626 if (ret < 0)
627 return ret;
628
629 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
630 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
631 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
632 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
633
634 verify_dma(gpu, &debug);
635
636 seq_puts(m, "\tfeatures\n");
637 seq_printf(m, "\t minor_features0: 0x%08x\n",
638 gpu->identity.minor_features0);
639 seq_printf(m, "\t minor_features1: 0x%08x\n",
640 gpu->identity.minor_features1);
641 seq_printf(m, "\t minor_features2: 0x%08x\n",
642 gpu->identity.minor_features2);
643 seq_printf(m, "\t minor_features3: 0x%08x\n",
644 gpu->identity.minor_features3);
645
646 seq_puts(m, "\tspecs\n");
647 seq_printf(m, "\t stream_count: %d\n",
648 gpu->identity.stream_count);
649 seq_printf(m, "\t register_max: %d\n",
650 gpu->identity.register_max);
651 seq_printf(m, "\t thread_count: %d\n",
652 gpu->identity.thread_count);
653 seq_printf(m, "\t vertex_cache_size: %d\n",
654 gpu->identity.vertex_cache_size);
655 seq_printf(m, "\t shader_core_count: %d\n",
656 gpu->identity.shader_core_count);
657 seq_printf(m, "\t pixel_pipes: %d\n",
658 gpu->identity.pixel_pipes);
659 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
660 gpu->identity.vertex_output_buffer_size);
661 seq_printf(m, "\t buffer_size: %d\n",
662 gpu->identity.buffer_size);
663 seq_printf(m, "\t instruction_count: %d\n",
664 gpu->identity.instruction_count);
665 seq_printf(m, "\t num_constants: %d\n",
666 gpu->identity.num_constants);
667
668 seq_printf(m, "\taxi: 0x%08x\n", axi);
669 seq_printf(m, "\tidle: 0x%08x\n", idle);
670 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
671 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
672 seq_puts(m, "\t FE is not idle\n");
673 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
674 seq_puts(m, "\t DE is not idle\n");
675 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
676 seq_puts(m, "\t PE is not idle\n");
677 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
678 seq_puts(m, "\t SH is not idle\n");
679 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
680 seq_puts(m, "\t PA is not idle\n");
681 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
682 seq_puts(m, "\t SE is not idle\n");
683 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
684 seq_puts(m, "\t RA is not idle\n");
685 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
686 seq_puts(m, "\t TX is not idle\n");
687 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
688 seq_puts(m, "\t VG is not idle\n");
689 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
690 seq_puts(m, "\t IM is not idle\n");
691 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
692 seq_puts(m, "\t FP is not idle\n");
693 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
694 seq_puts(m, "\t TS is not idle\n");
695 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
696 seq_puts(m, "\t AXI low power mode\n");
697
698 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
699 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
700 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
701 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
702
703 seq_puts(m, "\tMC\n");
704 seq_printf(m, "\t read0: 0x%08x\n", read0);
705 seq_printf(m, "\t read1: 0x%08x\n", read1);
706 seq_printf(m, "\t write: 0x%08x\n", write);
707 }
708
709 seq_puts(m, "\tDMA ");
710
711 if (debug.address[0] == debug.address[1] &&
712 debug.state[0] == debug.state[1]) {
713 seq_puts(m, "seems to be stuck\n");
714 } else if (debug.address[0] == debug.address[1]) {
715 seq_puts(m, "adress is constant\n");
716 } else {
717 seq_puts(m, "is runing\n");
718 }
719
720 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
721 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
722 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
723 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
724 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
725 dma_lo, dma_hi);
726
727 ret = 0;
728
729 pm_runtime_mark_last_busy(gpu->dev);
730 pm_runtime_put_autosuspend(gpu->dev);
731
732 return ret;
733}
734#endif
735
736/*
737 * Power Management:
738 */
739static int enable_clk(struct etnaviv_gpu *gpu)
740{
741 if (gpu->clk_core)
742 clk_prepare_enable(gpu->clk_core);
743 if (gpu->clk_shader)
744 clk_prepare_enable(gpu->clk_shader);
745
746 return 0;
747}
748
749static int disable_clk(struct etnaviv_gpu *gpu)
750{
751 if (gpu->clk_core)
752 clk_disable_unprepare(gpu->clk_core);
753 if (gpu->clk_shader)
754 clk_disable_unprepare(gpu->clk_shader);
755
756 return 0;
757}
758
759static int enable_axi(struct etnaviv_gpu *gpu)
760{
761 if (gpu->clk_bus)
762 clk_prepare_enable(gpu->clk_bus);
763
764 return 0;
765}
766
767static int disable_axi(struct etnaviv_gpu *gpu)
768{
769 if (gpu->clk_bus)
770 clk_disable_unprepare(gpu->clk_bus);
771
772 return 0;
773}
774
775/*
776 * Hangcheck detection for locked gpu:
777 */
778static void recover_worker(struct work_struct *work)
779{
780 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
781 recover_work);
782 unsigned long flags;
783 unsigned int i;
784
785 dev_err(gpu->dev, "hangcheck recover!\n");
786
787 if (pm_runtime_get_sync(gpu->dev) < 0)
788 return;
789
790 mutex_lock(&gpu->lock);
791
792 /* Only catch the first event, or when manually re-armed */
793 if (etnaviv_dump_core) {
794 etnaviv_core_dump(gpu);
795 etnaviv_dump_core = false;
796 }
797
798 etnaviv_hw_reset(gpu);
799
800 /* complete all events, the GPU won't do it after the reset */
801 spin_lock_irqsave(&gpu->event_spinlock, flags);
802 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
803 if (!gpu->event[i].used)
804 continue;
805 fence_signal(gpu->event[i].fence);
806 gpu->event[i].fence = NULL;
807 gpu->event[i].used = false;
808 complete(&gpu->event_free);
809 /*
810 * Decrement the PM count for each stuck event. This is safe
811 * even in atomic context as we use ASYNC RPM here.
812 */
813 pm_runtime_put_autosuspend(gpu->dev);
814 }
815 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
816 gpu->completed_fence = gpu->active_fence;
817
818 etnaviv_gpu_hw_init(gpu);
819 gpu->switch_context = true;
820
821 mutex_unlock(&gpu->lock);
822 pm_runtime_mark_last_busy(gpu->dev);
823 pm_runtime_put_autosuspend(gpu->dev);
824
825 /* Retire the buffer objects in a work */
826 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
827}
828
829static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
830{
831 DBG("%s", dev_name(gpu->dev));
832 mod_timer(&gpu->hangcheck_timer,
833 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
834}
835
836static void hangcheck_handler(unsigned long data)
837{
838 struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
839 u32 fence = gpu->completed_fence;
840 bool progress = false;
841
842 if (fence != gpu->hangcheck_fence) {
843 gpu->hangcheck_fence = fence;
844 progress = true;
845 }
846
847 if (!progress) {
848 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
849 int change = dma_addr - gpu->hangcheck_dma_addr;
850
851 if (change < 0 || change > 16) {
852 gpu->hangcheck_dma_addr = dma_addr;
853 progress = true;
854 }
855 }
856
857 if (!progress && fence_after(gpu->active_fence, fence)) {
858 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
859 dev_err(gpu->dev, " completed fence: %u\n", fence);
860 dev_err(gpu->dev, " active fence: %u\n",
861 gpu->active_fence);
862 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
863 }
864
865 /* if still more pending work, reset the hangcheck timer: */
866 if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
867 hangcheck_timer_reset(gpu);
868}
869
870static void hangcheck_disable(struct etnaviv_gpu *gpu)
871{
872 del_timer_sync(&gpu->hangcheck_timer);
873 cancel_work_sync(&gpu->recover_work);
874}
875
876/* fence object management */
877struct etnaviv_fence {
878 struct etnaviv_gpu *gpu;
879 struct fence base;
880};
881
882static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
883{
884 return container_of(fence, struct etnaviv_fence, base);
885}
886
887static const char *etnaviv_fence_get_driver_name(struct fence *fence)
888{
889 return "etnaviv";
890}
891
892static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
893{
894 struct etnaviv_fence *f = to_etnaviv_fence(fence);
895
896 return dev_name(f->gpu->dev);
897}
898
899static bool etnaviv_fence_enable_signaling(struct fence *fence)
900{
901 return true;
902}
903
904static bool etnaviv_fence_signaled(struct fence *fence)
905{
906 struct etnaviv_fence *f = to_etnaviv_fence(fence);
907
908 return fence_completed(f->gpu, f->base.seqno);
909}
910
911static void etnaviv_fence_release(struct fence *fence)
912{
913 struct etnaviv_fence *f = to_etnaviv_fence(fence);
914
915 kfree_rcu(f, base.rcu);
916}
917
918static const struct fence_ops etnaviv_fence_ops = {
919 .get_driver_name = etnaviv_fence_get_driver_name,
920 .get_timeline_name = etnaviv_fence_get_timeline_name,
921 .enable_signaling = etnaviv_fence_enable_signaling,
922 .signaled = etnaviv_fence_signaled,
923 .wait = fence_default_wait,
924 .release = etnaviv_fence_release,
925};
926
927static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
928{
929 struct etnaviv_fence *f;
930
931 f = kzalloc(sizeof(*f), GFP_KERNEL);
932 if (!f)
933 return NULL;
934
935 f->gpu = gpu;
936
937 fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
938 gpu->fence_context, ++gpu->next_fence);
939
940 return &f->base;
941}
942
943int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
944 unsigned int context, bool exclusive)
945{
946 struct reservation_object *robj = etnaviv_obj->resv;
947 struct reservation_object_list *fobj;
948 struct fence *fence;
949 int i, ret;
950
951 if (!exclusive) {
952 ret = reservation_object_reserve_shared(robj);
953 if (ret)
954 return ret;
955 }
956
957 /*
958 * If we have any shared fences, then the exclusive fence
959 * should be ignored as it will already have been signalled.
960 */
961 fobj = reservation_object_get_list(robj);
962 if (!fobj || fobj->shared_count == 0) {
963 /* Wait on any existing exclusive fence which isn't our own */
964 fence = reservation_object_get_excl(robj);
965 if (fence && fence->context != context) {
966 ret = fence_wait(fence, true);
967 if (ret)
968 return ret;
969 }
970 }
971
972 if (!exclusive || !fobj)
973 return 0;
974
975 for (i = 0; i < fobj->shared_count; i++) {
976 fence = rcu_dereference_protected(fobj->shared[i],
977 reservation_object_held(robj));
978 if (fence->context != context) {
979 ret = fence_wait(fence, true);
980 if (ret)
981 return ret;
982 }
983 }
984
985 return 0;
986}
987
988/*
989 * event management:
990 */
991
992static unsigned int event_alloc(struct etnaviv_gpu *gpu)
993{
994 unsigned long ret, flags;
995 unsigned int i, event = ~0U;
996
997 ret = wait_for_completion_timeout(&gpu->event_free,
998 msecs_to_jiffies(10 * 10000));
999 if (!ret)
1000 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1001
1002 spin_lock_irqsave(&gpu->event_spinlock, flags);
1003
1004 /* find first free event */
1005 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1006 if (gpu->event[i].used == false) {
1007 gpu->event[i].used = true;
1008 event = i;
1009 break;
1010 }
1011 }
1012
1013 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1014
1015 return event;
1016}
1017
1018static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1019{
1020 unsigned long flags;
1021
1022 spin_lock_irqsave(&gpu->event_spinlock, flags);
1023
1024 if (gpu->event[event].used == false) {
1025 dev_warn(gpu->dev, "event %u is already marked as free",
1026 event);
1027 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1028 } else {
1029 gpu->event[event].used = false;
1030 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1031
1032 complete(&gpu->event_free);
1033 }
1034}
1035
1036/*
1037 * Cmdstream submission/retirement:
1038 */
1039
1040struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1041 size_t nr_bos)
1042{
1043 struct etnaviv_cmdbuf *cmdbuf;
1044 size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
1045 sizeof(*cmdbuf));
1046
1047 cmdbuf = kzalloc(sz, GFP_KERNEL);
1048 if (!cmdbuf)
1049 return NULL;
1050
1051 cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
1052 GFP_KERNEL);
1053 if (!cmdbuf->vaddr) {
1054 kfree(cmdbuf);
1055 return NULL;
1056 }
1057
1058 cmdbuf->gpu = gpu;
1059 cmdbuf->size = size;
1060
1061 return cmdbuf;
1062}
1063
1064void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1065{
1066 dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
1067 cmdbuf->vaddr, cmdbuf->paddr);
1068 kfree(cmdbuf);
1069}
1070
1071static void retire_worker(struct work_struct *work)
1072{
1073 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1074 retire_work);
1075 u32 fence = gpu->completed_fence;
1076 struct etnaviv_cmdbuf *cmdbuf, *tmp;
1077 unsigned int i;
1078
1079 mutex_lock(&gpu->lock);
1080 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1081 if (!fence_is_signaled(cmdbuf->fence))
1082 break;
1083
1084 list_del(&cmdbuf->node);
1085 fence_put(cmdbuf->fence);
1086
1087 for (i = 0; i < cmdbuf->nr_bos; i++) {
1088 struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
1089
1090 atomic_dec(&etnaviv_obj->gpu_active);
1091 /* drop the refcount taken in etnaviv_gpu_submit */
1092 etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
1093 }
1094
1095 etnaviv_gpu_cmdbuf_free(cmdbuf);
1096 }
1097
1098 gpu->retired_fence = fence;
1099
1100 mutex_unlock(&gpu->lock);
1101
1102 wake_up_all(&gpu->fence_event);
1103}
1104
1105int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1106 u32 fence, struct timespec *timeout)
1107{
1108 int ret;
1109
1110 if (fence_after(fence, gpu->next_fence)) {
1111 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1112 fence, gpu->next_fence);
1113 return -EINVAL;
1114 }
1115
1116 if (!timeout) {
1117 /* No timeout was requested: just test for completion */
1118 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1119 } else {
1120 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1121
1122 ret = wait_event_interruptible_timeout(gpu->fence_event,
1123 fence_completed(gpu, fence),
1124 remaining);
1125 if (ret == 0) {
1126 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1127 fence, gpu->retired_fence,
1128 gpu->completed_fence);
1129 ret = -ETIMEDOUT;
1130 } else if (ret != -ERESTARTSYS) {
1131 ret = 0;
1132 }
1133 }
1134
1135 return ret;
1136}
1137
1138/*
1139 * Wait for an object to become inactive. This, on it's own, is not race
1140 * free: the object is moved by the retire worker off the active list, and
1141 * then the iova is put. Moreover, the object could be re-submitted just
1142 * after we notice that it's become inactive.
1143 *
1144 * Although the retirement happens under the gpu lock, we don't want to hold
1145 * that lock in this function while waiting.
1146 */
1147int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1148 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1149{
1150 unsigned long remaining;
1151 long ret;
1152
1153 if (!timeout)
1154 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1155
1156 remaining = etnaviv_timeout_to_jiffies(timeout);
1157
1158 ret = wait_event_interruptible_timeout(gpu->fence_event,
1159 !is_active(etnaviv_obj),
1160 remaining);
1161 if (ret > 0) {
1162 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1163
1164 /* Synchronise with the retire worker */
1165 flush_workqueue(priv->wq);
1166 return 0;
1167 } else if (ret == -ERESTARTSYS) {
1168 return -ERESTARTSYS;
1169 } else {
1170 return -ETIMEDOUT;
1171 }
1172}
1173
1174int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1175{
1176 return pm_runtime_get_sync(gpu->dev);
1177}
1178
1179void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1180{
1181 pm_runtime_mark_last_busy(gpu->dev);
1182 pm_runtime_put_autosuspend(gpu->dev);
1183}
1184
1185/* add bo's to gpu's ring, and kick gpu: */
1186int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1187 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1188{
1189 struct fence *fence;
1190 unsigned int event, i;
1191 int ret;
1192
1193 ret = etnaviv_gpu_pm_get_sync(gpu);
1194 if (ret < 0)
1195 return ret;
1196
1197 mutex_lock(&gpu->lock);
1198
1199 /*
1200 * TODO
1201 *
1202 * - flush
1203 * - data endian
1204 * - prefetch
1205 *
1206 */
1207
1208 event = event_alloc(gpu);
1209 if (unlikely(event == ~0U)) {
1210 DRM_ERROR("no free event\n");
1211 ret = -EBUSY;
1212 goto out_unlock;
1213 }
1214
1215 fence = etnaviv_gpu_fence_alloc(gpu);
1216 if (!fence) {
1217 event_free(gpu, event);
1218 ret = -ENOMEM;
1219 goto out_unlock;
1220 }
1221
1222 gpu->event[event].fence = fence;
1223 submit->fence = fence->seqno;
1224 gpu->active_fence = submit->fence;
1225
1226 if (gpu->lastctx != cmdbuf->ctx) {
1227 gpu->mmu->need_flush = true;
1228 gpu->switch_context = true;
1229 gpu->lastctx = cmdbuf->ctx;
1230 }
1231
1232 etnaviv_buffer_queue(gpu, event, cmdbuf);
1233
1234 cmdbuf->fence = fence;
1235 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1236
1237 /* We're committed to adding this command buffer, hold a PM reference */
1238 pm_runtime_get_noresume(gpu->dev);
1239
1240 for (i = 0; i < submit->nr_bos; i++) {
1241 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1242 u32 iova;
1243
1244 /* Each cmdbuf takes a refcount on the iova */
1245 etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
1246 cmdbuf->bo[i] = etnaviv_obj;
1247 atomic_inc(&etnaviv_obj->gpu_active);
1248
1249 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1250 reservation_object_add_excl_fence(etnaviv_obj->resv,
1251 fence);
1252 else
1253 reservation_object_add_shared_fence(etnaviv_obj->resv,
1254 fence);
1255 }
1256 cmdbuf->nr_bos = submit->nr_bos;
1257 hangcheck_timer_reset(gpu);
1258 ret = 0;
1259
1260out_unlock:
1261 mutex_unlock(&gpu->lock);
1262
1263 etnaviv_gpu_pm_put(gpu);
1264
1265 return ret;
1266}
1267
1268/*
1269 * Init/Cleanup:
1270 */
1271static irqreturn_t irq_handler(int irq, void *data)
1272{
1273 struct etnaviv_gpu *gpu = data;
1274 irqreturn_t ret = IRQ_NONE;
1275
1276 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1277
1278 if (intr != 0) {
1279 int event;
1280
1281 pm_runtime_mark_last_busy(gpu->dev);
1282
1283 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1284
1285 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1286 dev_err(gpu->dev, "AXI bus error\n");
1287 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1288 }
1289
1290 while ((event = ffs(intr)) != 0) {
1291 struct fence *fence;
1292
1293 event -= 1;
1294
1295 intr &= ~(1 << event);
1296
1297 dev_dbg(gpu->dev, "event %u\n", event);
1298
1299 fence = gpu->event[event].fence;
1300 gpu->event[event].fence = NULL;
1301 fence_signal(fence);
1302
1303 /*
1304 * Events can be processed out of order. Eg,
1305 * - allocate and queue event 0
1306 * - allocate event 1
1307 * - event 0 completes, we process it
1308 * - allocate and queue event 0
1309 * - event 1 and event 0 complete
1310 * we can end up processing event 0 first, then 1.
1311 */
1312 if (fence_after(fence->seqno, gpu->completed_fence))
1313 gpu->completed_fence = fence->seqno;
1314
1315 event_free(gpu, event);
1316
1317 /*
1318 * We need to balance the runtime PM count caused by
1319 * each submission. Upon submission, we increment
1320 * the runtime PM counter, and allocate one event.
1321 * So here, we put the runtime PM count for each
1322 * completed event.
1323 */
1324 pm_runtime_put_autosuspend(gpu->dev);
1325 }
1326
1327 /* Retire the buffer objects in a work */
1328 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1329
1330 ret = IRQ_HANDLED;
1331 }
1332
1333 return ret;
1334}
1335
1336static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1337{
1338 int ret;
1339
1340 ret = enable_clk(gpu);
1341 if (ret)
1342 return ret;
1343
1344 ret = enable_axi(gpu);
1345 if (ret) {
1346 disable_clk(gpu);
1347 return ret;
1348 }
1349
1350 return 0;
1351}
1352
1353static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1354{
1355 int ret;
1356
1357 ret = disable_axi(gpu);
1358 if (ret)
1359 return ret;
1360
1361 ret = disable_clk(gpu);
1362 if (ret)
1363 return ret;
1364
1365 return 0;
1366}
1367
1368static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1369{
1370 if (gpu->buffer) {
1371 unsigned long timeout;
1372
1373 /* Replace the last WAIT with END */
1374 etnaviv_buffer_end(gpu);
1375
1376 /*
1377 * We know that only the FE is busy here, this should
1378 * happen quickly (as the WAIT is only 200 cycles). If
1379 * we fail, just warn and continue.
1380 */
1381 timeout = jiffies + msecs_to_jiffies(100);
1382 do {
1383 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1384
1385 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1386 break;
1387
1388 if (time_is_before_jiffies(timeout)) {
1389 dev_warn(gpu->dev,
1390 "timed out waiting for idle: idle=0x%x\n",
1391 idle);
1392 break;
1393 }
1394
1395 udelay(5);
1396 } while (1);
1397 }
1398
1399 return etnaviv_gpu_clk_disable(gpu);
1400}
1401
1402#ifdef CONFIG_PM
1403static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1404{
1405 u32 clock;
1406 int ret;
1407
1408 ret = mutex_lock_killable(&gpu->lock);
1409 if (ret)
1410 return ret;
1411
1412 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1413 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1414
1415 etnaviv_gpu_load_clock(gpu, clock);
1416 etnaviv_gpu_hw_init(gpu);
1417
1418 gpu->switch_context = true;
1419
1420 mutex_unlock(&gpu->lock);
1421
1422 return 0;
1423}
1424#endif
1425
1426static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1427 void *data)
1428{
1429 struct drm_device *drm = data;
1430 struct etnaviv_drm_private *priv = drm->dev_private;
1431 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1432 int ret;
1433
1434#ifdef CONFIG_PM
1435 ret = pm_runtime_get_sync(gpu->dev);
1436#else
1437 ret = etnaviv_gpu_clk_enable(gpu);
1438#endif
1439 if (ret < 0)
1440 return ret;
1441
1442 gpu->drm = drm;
1443 gpu->fence_context = fence_context_alloc(1);
1444 spin_lock_init(&gpu->fence_spinlock);
1445
1446 INIT_LIST_HEAD(&gpu->active_cmd_list);
1447 INIT_WORK(&gpu->retire_work, retire_worker);
1448 INIT_WORK(&gpu->recover_work, recover_worker);
1449 init_waitqueue_head(&gpu->fence_event);
1450
1451 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
1452 (unsigned long)gpu);
1453
1454 priv->gpu[priv->num_gpus++] = gpu;
1455
1456 pm_runtime_mark_last_busy(gpu->dev);
1457 pm_runtime_put_autosuspend(gpu->dev);
1458
1459 return 0;
1460}
1461
1462static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1463 void *data)
1464{
1465 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1466
1467 DBG("%s", dev_name(gpu->dev));
1468
1469 hangcheck_disable(gpu);
1470
1471#ifdef CONFIG_PM
1472 pm_runtime_get_sync(gpu->dev);
1473 pm_runtime_put_sync_suspend(gpu->dev);
1474#else
1475 etnaviv_gpu_hw_suspend(gpu);
1476#endif
1477
1478 if (gpu->buffer) {
1479 etnaviv_gpu_cmdbuf_free(gpu->buffer);
1480 gpu->buffer = NULL;
1481 }
1482
1483 if (gpu->mmu) {
1484 etnaviv_iommu_destroy(gpu->mmu);
1485 gpu->mmu = NULL;
1486 }
1487
1488 gpu->drm = NULL;
1489}
1490
1491static const struct component_ops gpu_ops = {
1492 .bind = etnaviv_gpu_bind,
1493 .unbind = etnaviv_gpu_unbind,
1494};
1495
1496static const struct of_device_id etnaviv_gpu_match[] = {
1497 {
1498 .compatible = "vivante,gc"
1499 },
1500 { /* sentinel */ }
1501};
1502
1503static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1504{
1505 struct device *dev = &pdev->dev;
1506 struct etnaviv_gpu *gpu;
1507 int err = 0;
1508
1509 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1510 if (!gpu)
1511 return -ENOMEM;
1512
1513 gpu->dev = &pdev->dev;
1514 mutex_init(&gpu->lock);
1515
1516 /*
1517 * Set the GPU base address to the start of physical memory. This
1518 * ensures that if we have up to 2GB, the v1 MMU can address the
1519 * highest memory. This is important as command buffers may be
1520 * allocated outside of this limit.
1521 */
1522 gpu->memory_base = PHYS_OFFSET;
1523
1524 /* Map registers: */
1525 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1526 if (IS_ERR(gpu->mmio))
1527 return PTR_ERR(gpu->mmio);
1528
1529 /* Get Interrupt: */
1530 gpu->irq = platform_get_irq(pdev, 0);
1531 if (gpu->irq < 0) {
1532 err = gpu->irq;
1533 dev_err(dev, "failed to get irq: %d\n", err);
1534 goto fail;
1535 }
1536
1537 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1538 dev_name(gpu->dev), gpu);
1539 if (err) {
1540 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1541 goto fail;
1542 }
1543
1544 /* Get Clocks: */
1545 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1546 DBG("clk_bus: %p", gpu->clk_bus);
1547 if (IS_ERR(gpu->clk_bus))
1548 gpu->clk_bus = NULL;
1549
1550 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1551 DBG("clk_core: %p", gpu->clk_core);
1552 if (IS_ERR(gpu->clk_core))
1553 gpu->clk_core = NULL;
1554
1555 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1556 DBG("clk_shader: %p", gpu->clk_shader);
1557 if (IS_ERR(gpu->clk_shader))
1558 gpu->clk_shader = NULL;
1559
1560 /* TODO: figure out max mapped size */
1561 dev_set_drvdata(dev, gpu);
1562
1563 /*
1564 * We treat the device as initially suspended. The runtime PM
1565 * autosuspend delay is rather arbitary: no measurements have
1566 * yet been performed to determine an appropriate value.
1567 */
1568 pm_runtime_use_autosuspend(gpu->dev);
1569 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1570 pm_runtime_enable(gpu->dev);
1571
1572 err = component_add(&pdev->dev, &gpu_ops);
1573 if (err < 0) {
1574 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1575 goto fail;
1576 }
1577
1578 return 0;
1579
1580fail:
1581 return err;
1582}
1583
1584static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1585{
1586 component_del(&pdev->dev, &gpu_ops);
1587 pm_runtime_disable(&pdev->dev);
1588 return 0;
1589}
1590
1591#ifdef CONFIG_PM
1592static int etnaviv_gpu_rpm_suspend(struct device *dev)
1593{
1594 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1595 u32 idle, mask;
1596
1597 /* If we have outstanding fences, we're not idle */
1598 if (gpu->completed_fence != gpu->active_fence)
1599 return -EBUSY;
1600
1601 /* Check whether the hardware (except FE) is idle */
1602 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1603 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1604 if (idle != mask)
1605 return -EBUSY;
1606
1607 return etnaviv_gpu_hw_suspend(gpu);
1608}
1609
1610static int etnaviv_gpu_rpm_resume(struct device *dev)
1611{
1612 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1613 int ret;
1614
1615 ret = etnaviv_gpu_clk_enable(gpu);
1616 if (ret)
1617 return ret;
1618
1619 /* Re-initialise the basic hardware state */
1620 if (gpu->drm && gpu->buffer) {
1621 ret = etnaviv_gpu_hw_resume(gpu);
1622 if (ret) {
1623 etnaviv_gpu_clk_disable(gpu);
1624 return ret;
1625 }
1626 }
1627
1628 return 0;
1629}
1630#endif
1631
1632static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1633 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1634 NULL)
1635};
1636
1637struct platform_driver etnaviv_gpu_driver = {
1638 .driver = {
1639 .name = "etnaviv-gpu",
1640 .owner = THIS_MODULE,
1641 .pm = &etnaviv_gpu_pm_ops,
1642 .of_match_table = etnaviv_gpu_match,
1643 },
1644 .probe = etnaviv_gpu_platform_probe,
1645 .remove = etnaviv_gpu_platform_remove,
1646 .id_table = gpu_ids,
1647};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
new file mode 100644
index 000000000000..c75d50359ab0
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -0,0 +1,209 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_GPU_H__
18#define __ETNAVIV_GPU_H__
19
20#include <linux/clk.h>
21#include <linux/regulator/consumer.h>
22
23#include "etnaviv_drv.h"
24
25struct etnaviv_gem_submit;
26
27struct etnaviv_chip_identity {
28 /* Chip model. */
29 u32 model;
30
31 /* Revision value.*/
32 u32 revision;
33
34 /* Supported feature fields. */
35 u32 features;
36
37 /* Supported minor feature fields. */
38 u32 minor_features0;
39
40 /* Supported minor feature 1 fields. */
41 u32 minor_features1;
42
43 /* Supported minor feature 2 fields. */
44 u32 minor_features2;
45
46 /* Supported minor feature 3 fields. */
47 u32 minor_features3;
48
49 /* Number of streams supported. */
50 u32 stream_count;
51
52 /* Total number of temporary registers per thread. */
53 u32 register_max;
54
55 /* Maximum number of threads. */
56 u32 thread_count;
57
58 /* Number of shader cores. */
59 u32 shader_core_count;
60
61 /* Size of the vertex cache. */
62 u32 vertex_cache_size;
63
64 /* Number of entries in the vertex output buffer. */
65 u32 vertex_output_buffer_size;
66
67 /* Number of pixel pipes. */
68 u32 pixel_pipes;
69
70 /* Number of instructions. */
71 u32 instruction_count;
72
73 /* Number of constants. */
74 u32 num_constants;
75
76 /* Buffer size */
77 u32 buffer_size;
78};
79
80struct etnaviv_event {
81 bool used;
82 struct fence *fence;
83};
84
85struct etnaviv_cmdbuf;
86
87struct etnaviv_gpu {
88 struct drm_device *drm;
89 struct device *dev;
90 struct mutex lock;
91 struct etnaviv_chip_identity identity;
92 struct etnaviv_file_private *lastctx;
93 bool switch_context;
94
95 /* 'ring'-buffer: */
96 struct etnaviv_cmdbuf *buffer;
97
98 /* bus base address of memory */
99 u32 memory_base;
100
101 /* event management: */
102 struct etnaviv_event event[30];
103 struct completion event_free;
104 spinlock_t event_spinlock;
105
106 /* list of currently in-flight command buffers */
107 struct list_head active_cmd_list;
108
109 u32 idle_mask;
110
111 /* Fencing support */
112 u32 next_fence;
113 u32 active_fence;
114 u32 completed_fence;
115 u32 retired_fence;
116 wait_queue_head_t fence_event;
117 unsigned int fence_context;
118 spinlock_t fence_spinlock;
119
120 /* worker for handling active-list retiring: */
121 struct work_struct retire_work;
122
123 void __iomem *mmio;
124 int irq;
125
126 struct etnaviv_iommu *mmu;
127
128 /* Power Control: */
129 struct clk *clk_bus;
130 struct clk *clk_core;
131 struct clk *clk_shader;
132
133 /* Hang Detction: */
134#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
135#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
136 struct timer_list hangcheck_timer;
137 u32 hangcheck_fence;
138 u32 hangcheck_dma_addr;
139 struct work_struct recover_work;
140};
141
142struct etnaviv_cmdbuf {
143 /* device this cmdbuf is allocated for */
144 struct etnaviv_gpu *gpu;
145 /* user context key, must be unique between all active users */
146 struct etnaviv_file_private *ctx;
147 /* cmdbuf properties */
148 void *vaddr;
149 dma_addr_t paddr;
150 u32 size;
151 u32 user_size;
152 /* fence after which this buffer is to be disposed */
153 struct fence *fence;
154 /* target exec state */
155 u32 exec_state;
156 /* per GPU in-flight list */
157 struct list_head node;
158 /* BOs attached to this command buffer */
159 unsigned int nr_bos;
160 struct etnaviv_gem_object *bo[0];
161};
162
163static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
164{
165 etnaviv_writel(data, gpu->mmio + reg);
166}
167
168static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
169{
170 return etnaviv_readl(gpu->mmio + reg);
171}
172
173static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
174{
175 return fence_after_eq(gpu->completed_fence, fence);
176}
177
178static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
179{
180 return fence_after_eq(gpu->retired_fence, fence);
181}
182
183int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
184
185int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
186
187#ifdef CONFIG_DEBUG_FS
188int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
189#endif
190
191int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
192 unsigned int context, bool exclusive);
193
194void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
195int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
196 u32 fence, struct timespec *timeout);
197int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
198 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
199int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
200 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
201struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
202 u32 size, size_t nr_bos);
203void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
204int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
205void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
206
207extern struct platform_driver etnaviv_gpu_driver;
208
209#endif /* __ETNAVIV_GPU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
new file mode 100644
index 000000000000..522cfd447892
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -0,0 +1,240 @@
1/*
2 * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/iommu.h>
18#include <linux/platform_device.h>
19#include <linux/sizes.h>
20#include <linux/slab.h>
21#include <linux/dma-mapping.h>
22#include <linux/bitops.h>
23
24#include "etnaviv_gpu.h"
25#include "etnaviv_mmu.h"
26#include "etnaviv_iommu.h"
27#include "state_hi.xml.h"
28
29#define PT_SIZE SZ_2M
30#define PT_ENTRIES (PT_SIZE / sizeof(u32))
31
32#define GPU_MEM_START 0x80000000
33
34struct etnaviv_iommu_domain_pgtable {
35 u32 *pgtable;
36 dma_addr_t paddr;
37};
38
39struct etnaviv_iommu_domain {
40 struct iommu_domain domain;
41 struct device *dev;
42 void *bad_page_cpu;
43 dma_addr_t bad_page_dma;
44 struct etnaviv_iommu_domain_pgtable pgtable;
45 spinlock_t map_lock;
46};
47
48static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
49{
50 return container_of(domain, struct etnaviv_iommu_domain, domain);
51}
52
53static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
54 size_t size)
55{
56 pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
57 if (!pgtable->pgtable)
58 return -ENOMEM;
59
60 return 0;
61}
62
63static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
64 size_t size)
65{
66 dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
67}
68
69static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
70 unsigned long iova)
71{
72 /* calcuate index into page table */
73 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
74 phys_addr_t paddr;
75
76 paddr = pgtable->pgtable[index];
77
78 return paddr;
79}
80
81static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
82 unsigned long iova, phys_addr_t paddr)
83{
84 /* calcuate index into page table */
85 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
86
87 pgtable->pgtable[index] = paddr;
88}
89
90static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
91{
92 u32 *p;
93 int ret, i;
94
95 etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
96 SZ_4K,
97 &etnaviv_domain->bad_page_dma,
98 GFP_KERNEL);
99 if (!etnaviv_domain->bad_page_cpu)
100 return -ENOMEM;
101
102 p = etnaviv_domain->bad_page_cpu;
103 for (i = 0; i < SZ_4K / 4; i++)
104 *p++ = 0xdead55aa;
105
106 ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
107 if (ret < 0) {
108 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
109 etnaviv_domain->bad_page_cpu,
110 etnaviv_domain->bad_page_dma);
111 return ret;
112 }
113
114 for (i = 0; i < PT_ENTRIES; i++)
115 etnaviv_domain->pgtable.pgtable[i] =
116 etnaviv_domain->bad_page_dma;
117
118 spin_lock_init(&etnaviv_domain->map_lock);
119
120 return 0;
121}
122
123static void etnaviv_domain_free(struct iommu_domain *domain)
124{
125 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
126
127 pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
128
129 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
130 etnaviv_domain->bad_page_cpu,
131 etnaviv_domain->bad_page_dma);
132
133 kfree(etnaviv_domain);
134}
135
136static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
137 phys_addr_t paddr, size_t size, int prot)
138{
139 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
140
141 if (size != SZ_4K)
142 return -EINVAL;
143
144 spin_lock(&etnaviv_domain->map_lock);
145 pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
146 spin_unlock(&etnaviv_domain->map_lock);
147
148 return 0;
149}
150
151static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
152 unsigned long iova, size_t size)
153{
154 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
155
156 if (size != SZ_4K)
157 return -EINVAL;
158
159 spin_lock(&etnaviv_domain->map_lock);
160 pgtable_write(&etnaviv_domain->pgtable, iova,
161 etnaviv_domain->bad_page_dma);
162 spin_unlock(&etnaviv_domain->map_lock);
163
164 return SZ_4K;
165}
166
167static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
168 dma_addr_t iova)
169{
170 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
171
172 return pgtable_read(&etnaviv_domain->pgtable, iova);
173}
174
175static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
176{
177 return PT_SIZE;
178}
179
180static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
181{
182 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
183
184 memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
185}
186
187static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
188 .ops = {
189 .domain_free = etnaviv_domain_free,
190 .map = etnaviv_iommuv1_map,
191 .unmap = etnaviv_iommuv1_unmap,
192 .iova_to_phys = etnaviv_iommu_iova_to_phys,
193 .pgsize_bitmap = SZ_4K,
194 },
195 .dump_size = etnaviv_iommuv1_dump_size,
196 .dump = etnaviv_iommuv1_dump,
197};
198
199void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
200 struct iommu_domain *domain)
201{
202 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
203 u32 pgtable;
204
205 /* set page table address in MC */
206 pgtable = (u32)etnaviv_domain->pgtable.paddr;
207
208 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
209 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
210 gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
211 gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
212 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
213}
214
215struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
216{
217 struct etnaviv_iommu_domain *etnaviv_domain;
218 int ret;
219
220 etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
221 if (!etnaviv_domain)
222 return NULL;
223
224 etnaviv_domain->dev = gpu->dev;
225
226 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
227 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
228 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
229 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
230
231 ret = __etnaviv_iommu_init(etnaviv_domain);
232 if (ret)
233 goto out_free;
234
235 return &etnaviv_domain->domain;
236
237out_free:
238 kfree(etnaviv_domain);
239 return NULL;
240}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
new file mode 100644
index 000000000000..cf45503f6b6f
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_IOMMU_H__
18#define __ETNAVIV_IOMMU_H__
19
20#include <linux/iommu.h>
21struct etnaviv_gpu;
22
23struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
24void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
25 struct iommu_domain *domain);
26struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
27
28#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
new file mode 100644
index 000000000000..fbb4aed3dc80
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/iommu.h>
18#include <linux/platform_device.h>
19#include <linux/sizes.h>
20#include <linux/slab.h>
21#include <linux/dma-mapping.h>
22#include <linux/bitops.h>
23
24#include "etnaviv_gpu.h"
25#include "etnaviv_iommu.h"
26#include "state_hi.xml.h"
27
28
29struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
30{
31 /* TODO */
32 return NULL;
33}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
new file mode 100644
index 000000000000..603ea41c5389
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_IOMMU_V2_H__
18#define __ETNAVIV_IOMMU_V2_H__
19
20#include <linux/iommu.h>
21struct etnaviv_gpu;
22
23struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
24
25#endif /* __ETNAVIV_IOMMU_V2_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
new file mode 100644
index 000000000000..6743bc648dc8
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -0,0 +1,299 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include "etnaviv_drv.h"
18#include "etnaviv_gem.h"
19#include "etnaviv_gpu.h"
20#include "etnaviv_mmu.h"
21
22static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
23 unsigned long iova, int flags, void *arg)
24{
25 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
26 return 0;
27}
28
29int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
30 struct sg_table *sgt, unsigned len, int prot)
31{
32 struct iommu_domain *domain = iommu->domain;
33 struct scatterlist *sg;
34 unsigned int da = iova;
35 unsigned int i, j;
36 int ret;
37
38 if (!domain || !sgt)
39 return -EINVAL;
40
41 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
42 u32 pa = sg_dma_address(sg) - sg->offset;
43 size_t bytes = sg_dma_len(sg) + sg->offset;
44
45 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
46
47 ret = iommu_map(domain, da, pa, bytes, prot);
48 if (ret)
49 goto fail;
50
51 da += bytes;
52 }
53
54 return 0;
55
56fail:
57 da = iova;
58
59 for_each_sg(sgt->sgl, sg, i, j) {
60 size_t bytes = sg_dma_len(sg) + sg->offset;
61
62 iommu_unmap(domain, da, bytes);
63 da += bytes;
64 }
65 return ret;
66}
67
68int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
69 struct sg_table *sgt, unsigned len)
70{
71 struct iommu_domain *domain = iommu->domain;
72 struct scatterlist *sg;
73 unsigned int da = iova;
74 int i;
75
76 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
77 size_t bytes = sg_dma_len(sg) + sg->offset;
78 size_t unmapped;
79
80 unmapped = iommu_unmap(domain, da, bytes);
81 if (unmapped < bytes)
82 return unmapped;
83
84 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
85
86 BUG_ON(!PAGE_ALIGNED(bytes));
87
88 da += bytes;
89 }
90
91 return 0;
92}
93
94static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
95 struct etnaviv_vram_mapping *mapping)
96{
97 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
98
99 etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
100 etnaviv_obj->sgt, etnaviv_obj->base.size);
101 drm_mm_remove_node(&mapping->vram_node);
102}
103
104int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
105 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
106 struct etnaviv_vram_mapping *mapping)
107{
108 struct etnaviv_vram_mapping *free = NULL;
109 struct sg_table *sgt = etnaviv_obj->sgt;
110 struct drm_mm_node *node;
111 int ret;
112
113 lockdep_assert_held(&etnaviv_obj->lock);
114
115 mutex_lock(&mmu->lock);
116
117 /* v1 MMU can optimize single entry (contiguous) scatterlists */
118 if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
119 u32 iova;
120
121 iova = sg_dma_address(sgt->sgl) - memory_base;
122 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
123 mapping->iova = iova;
124 list_add_tail(&mapping->mmu_node, &mmu->mappings);
125 mutex_unlock(&mmu->lock);
126 return 0;
127 }
128 }
129
130 node = &mapping->vram_node;
131 while (1) {
132 struct etnaviv_vram_mapping *m, *n;
133 struct list_head list;
134 bool found;
135
136 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
137 etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
138 DRM_MM_SEARCH_DEFAULT);
139
140 if (ret != -ENOSPC)
141 break;
142
143 /*
144 * If we did not search from the start of the MMU region,
145 * try again in case there are free slots.
146 */
147 if (mmu->last_iova) {
148 mmu->last_iova = 0;
149 mmu->need_flush = true;
150 continue;
151 }
152
153 /* Try to retire some entries */
154 drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
155
156 found = 0;
157 INIT_LIST_HEAD(&list);
158 list_for_each_entry(free, &mmu->mappings, mmu_node) {
159 /* If this vram node has not been used, skip this. */
160 if (!free->vram_node.mm)
161 continue;
162
163 /*
164 * If the iova is pinned, then it's in-use,
165 * so we must keep its mapping.
166 */
167 if (free->use)
168 continue;
169
170 list_add(&free->scan_node, &list);
171 if (drm_mm_scan_add_block(&free->vram_node)) {
172 found = true;
173 break;
174 }
175 }
176
177 if (!found) {
178 /* Nothing found, clean up and fail */
179 list_for_each_entry_safe(m, n, &list, scan_node)
180 BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
181 break;
182 }
183
184 /*
185 * drm_mm does not allow any other operations while
186 * scanning, so we have to remove all blocks first.
187 * If drm_mm_scan_remove_block() returns false, we
188 * can leave the block pinned.
189 */
190 list_for_each_entry_safe(m, n, &list, scan_node)
191 if (!drm_mm_scan_remove_block(&m->vram_node))
192 list_del_init(&m->scan_node);
193
194 /*
195 * Unmap the blocks which need to be reaped from the MMU.
196 * Clear the mmu pointer to prevent the get_iova finding
197 * this mapping.
198 */
199 list_for_each_entry_safe(m, n, &list, scan_node) {
200 etnaviv_iommu_remove_mapping(mmu, m);
201 m->mmu = NULL;
202 list_del_init(&m->mmu_node);
203 list_del_init(&m->scan_node);
204 }
205
206 /*
207 * We removed enough mappings so that the new allocation will
208 * succeed. Ensure that the MMU will be flushed before the
209 * associated commit requesting this mapping, and retry the
210 * allocation one more time.
211 */
212 mmu->need_flush = true;
213 }
214
215 if (ret < 0) {
216 mutex_unlock(&mmu->lock);
217 return ret;
218 }
219
220 mmu->last_iova = node->start + etnaviv_obj->base.size;
221 mapping->iova = node->start;
222 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
223 IOMMU_READ | IOMMU_WRITE);
224
225 if (ret < 0) {
226 drm_mm_remove_node(node);
227 mutex_unlock(&mmu->lock);
228 return ret;
229 }
230
231 list_add_tail(&mapping->mmu_node, &mmu->mappings);
232 mutex_unlock(&mmu->lock);
233
234 return ret;
235}
236
237void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
238 struct etnaviv_vram_mapping *mapping)
239{
240 WARN_ON(mapping->use);
241
242 mutex_lock(&mmu->lock);
243
244 /* If the vram node is on the mm, unmap and remove the node */
245 if (mapping->vram_node.mm == &mmu->mm)
246 etnaviv_iommu_remove_mapping(mmu, mapping);
247
248 list_del(&mapping->mmu_node);
249 mutex_unlock(&mmu->lock);
250}
251
252void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
253{
254 drm_mm_takedown(&mmu->mm);
255 iommu_domain_free(mmu->domain);
256 kfree(mmu);
257}
258
259struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
260 struct iommu_domain *domain, enum etnaviv_iommu_version version)
261{
262 struct etnaviv_iommu *mmu;
263
264 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
265 if (!mmu)
266 return ERR_PTR(-ENOMEM);
267
268 mmu->domain = domain;
269 mmu->gpu = gpu;
270 mmu->version = version;
271 mutex_init(&mmu->lock);
272 INIT_LIST_HEAD(&mmu->mappings);
273
274 drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
275 domain->geometry.aperture_end -
276 domain->geometry.aperture_start + 1);
277
278 iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
279
280 return mmu;
281}
282
283size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
284{
285 struct etnaviv_iommu_ops *ops;
286
287 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
288
289 return ops->dump_size(iommu->domain);
290}
291
292void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
293{
294 struct etnaviv_iommu_ops *ops;
295
296 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
297
298 ops->dump(iommu->domain, buf);
299}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
new file mode 100644
index 000000000000..fff215a47630
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_MMU_H__
18#define __ETNAVIV_MMU_H__
19
20#include <linux/iommu.h>
21
22enum etnaviv_iommu_version {
23 ETNAVIV_IOMMU_V1 = 0,
24 ETNAVIV_IOMMU_V2,
25};
26
27struct etnaviv_gpu;
28struct etnaviv_vram_mapping;
29
30struct etnaviv_iommu_ops {
31 struct iommu_ops ops;
32 size_t (*dump_size)(struct iommu_domain *);
33 void (*dump)(struct iommu_domain *, void *);
34};
35
36struct etnaviv_iommu {
37 struct etnaviv_gpu *gpu;
38 struct iommu_domain *domain;
39
40 enum etnaviv_iommu_version version;
41
42 /* memory manager for GPU address area */
43 struct mutex lock;
44 struct list_head mappings;
45 struct drm_mm mm;
46 u32 last_iova;
47 bool need_flush;
48};
49
50struct etnaviv_gem_object;
51
52int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
53 int cnt);
54int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
55 struct sg_table *sgt, unsigned len, int prot);
56int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
57 struct sg_table *sgt, unsigned len);
58int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
59 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
60 struct etnaviv_vram_mapping *mapping);
61void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
62 struct etnaviv_vram_mapping *mapping);
63void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
64
65size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
66void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
67
68struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
69 struct iommu_domain *domain, enum etnaviv_iommu_version version);
70
71#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
new file mode 100644
index 000000000000..368218304566
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -0,0 +1,351 @@
1#ifndef STATE_XML
2#define STATE_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- state.xml ( 18882 bytes, from 2015-03-25 11:42:32)
12- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
13- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
14- state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06)
15- state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19)
16- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
17
18Copyright (C) 2015
19*/
20
21
22#define VARYING_COMPONENT_USE_UNUSED 0x00000000
23#define VARYING_COMPONENT_USE_USED 0x00000001
24#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002
25#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003
26#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff
27#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0
28#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
29#define VIVS_FE 0x00000000
30
31#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0))
32#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004
33#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010
34#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f
35#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0
36#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000
37#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001
38#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002
39#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003
40#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004
41#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005
42#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008
43#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009
44#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b
45#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c
46#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
47#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030
48#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4
49#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
50#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080
51#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700
52#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8
53#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK)
54#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000
55#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12
56#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK)
57#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000
58#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14
59#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000
60#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000
61#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000
62#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16
63#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK)
64#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000
65#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24
66#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK)
67
68#define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640
69
70#define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644
71
72#define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648
73#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003
74#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0
75#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000
76#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001
77#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002
78
79#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c
80
81#define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650
82
83#define VIVS_FE_COMMAND_ADDRESS 0x00000654
84
85#define VIVS_FE_COMMAND_CONTROL 0x00000658
86#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
87#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0
88#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK)
89#define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000
90
91#define VIVS_FE_DMA_STATUS 0x0000065c
92
93#define VIVS_FE_DMA_DEBUG_STATE 0x00000660
94#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f
95#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0
96#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000
97#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001
98#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002
99#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003
100#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004
101#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005
102#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006
103#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007
104#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008
105#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009
106#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a
107#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b
108#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c
109#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d
110#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e
111#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f
112#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010
113#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011
114#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012
115#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013
116#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014
117#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015
118#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300
119#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8
120#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000
121#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100
122#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200
123#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300
124#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00
125#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10
126#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000
127#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400
128#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800
129#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000
130#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12
131#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000
132#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000
133#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000
134#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000
135#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14
136#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000
137#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000
138#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000
139#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000
140#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16
141#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000
142#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000
143#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000
144
145#define VIVS_FE_DMA_ADDRESS 0x00000664
146
147#define VIVS_FE_DMA_LOW 0x00000668
148
149#define VIVS_FE_DMA_HIGH 0x0000066c
150
151#define VIVS_FE_AUTO_FLUSH 0x00000670
152
153#define VIVS_FE_UNK00678 0x00000678
154
155#define VIVS_FE_UNK0067C 0x0000067c
156
157#define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
158#define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004
159#define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008
160
161#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0))
162
163#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0))
164
165#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0))
166#define VIVS_FE_UNK00700__ESIZE 0x00000004
167#define VIVS_FE_UNK00700__LEN 0x00000010
168
169#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0))
170#define VIVS_FE_UNK00740__ESIZE 0x00000004
171#define VIVS_FE_UNK00740__LEN 0x00000010
172
173#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0))
174#define VIVS_FE_UNK00780__ESIZE 0x00000004
175#define VIVS_FE_UNK00780__LEN 0x00000010
176
177#define VIVS_GL 0x00000000
178
179#define VIVS_GL_PIPE_SELECT 0x00003800
180#define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001
181#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0
182#define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK)
183
184#define VIVS_GL_EVENT 0x00003804
185#define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f
186#define VIVS_GL_EVENT_EVENT_ID__SHIFT 0
187#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
188#define VIVS_GL_EVENT_FROM_FE 0x00000020
189#define VIVS_GL_EVENT_FROM_PE 0x00000040
190#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00
191#define VIVS_GL_EVENT_SOURCE__SHIFT 8
192#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
193
194#define VIVS_GL_SEMAPHORE_TOKEN 0x00003808
195#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f
196#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0
197#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK)
198#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00
199#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8
200#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
201
202#define VIVS_GL_FLUSH_CACHE 0x0000380c
203#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001
204#define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002
205#define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004
206#define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008
207#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010
208#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020
209#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040
210
211#define VIVS_GL_FLUSH_MMU 0x00003810
212#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001
213#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002
214#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004
215#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008
216#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010
217
218#define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814
219
220#define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818
221#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003
222#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0
223#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000
224#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001
225#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002
226#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008
227#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0
228#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4
229#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK)
230#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100
231#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000
232#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12
233#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK)
234#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000
235#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000
236#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16
237#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK)
238#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000
239
240#define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c
241#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff
242#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0
243#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
244
245#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820
246#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007
247#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0
248#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
249#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070
250#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4
251#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
252#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700
253#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8
254#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
255#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000
256#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12
257#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
258#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000
259#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16
260#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
261#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000
262#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20
263#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
264#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000
265#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24
266#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
267#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000
268#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28
269#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
270
271#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0))
272#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004
273#define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002
274#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003
275#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0
276#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK)
277#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c
278#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2
279#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK)
280#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030
281#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4
282#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK)
283#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0
284#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6
285#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK)
286#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300
287#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8
288#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK)
289#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00
290#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10
291#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK)
292#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000
293#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12
294#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK)
295#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000
296#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14
297#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK)
298#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000
299#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16
300#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK)
301#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000
302#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18
303#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK)
304#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000
305#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20
306#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK)
307#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000
308#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22
309#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK)
310#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000
311#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24
312#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK)
313#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000
314#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26
315#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK)
316#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000
317#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28
318#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK)
319#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000
320#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30
321#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
322
323#define VIVS_GL_UNK03834 0x00003834
324
325#define VIVS_GL_UNK03838 0x00003838
326
327#define VIVS_GL_API_MODE 0x0000384c
328#define VIVS_GL_API_MODE_OPENGL 0x00000000
329#define VIVS_GL_API_MODE_OPENVG 0x00000001
330#define VIVS_GL_API_MODE_OPENCL 0x00000002
331
332#define VIVS_GL_CONTEXT_POINTER 0x00003850
333
334#define VIVS_GL_UNK03A00 0x00003a00
335
336#define VIVS_GL_STALL_TOKEN 0x00003c00
337#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f
338#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0
339#define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK)
340#define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00
341#define VIVS_GL_STALL_TOKEN_TO__SHIFT 8
342#define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK)
343#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000
344#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000
345
346#define VIVS_DUMMY 0x00000000
347
348#define VIVS_DUMMY_DUMMY 0x0003fffc
349
350
351#endif /* STATE_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
new file mode 100644
index 000000000000..0064f2640396
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -0,0 +1,407 @@
1#ifndef STATE_HI_XML
2#define STATE_HI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
12- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
13
14Copyright (C) 2015
15*/
16
17
18#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001
19#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002
20#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003
21#define VIVS_HI 0x00000000
22
23#define VIVS_HI_CLOCK_CONTROL 0x00000000
24#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS 0x00000001
25#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS 0x00000002
26#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK 0x000001fc
27#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT 2
28#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x) (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK)
29#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD 0x00000200
30#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING 0x00000400
31#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS 0x00000800
32#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET 0x00001000
33#define VIVS_HI_CLOCK_CONTROL_IDLE_3D 0x00010000
34#define VIVS_HI_CLOCK_CONTROL_IDLE_2D 0x00020000
35#define VIVS_HI_CLOCK_CONTROL_IDLE_VG 0x00040000
36#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU 0x00080000
37#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK 0x00f00000
38#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT 20
39#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x) (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK)
40
41#define VIVS_HI_IDLE_STATE 0x00000004
42#define VIVS_HI_IDLE_STATE_FE 0x00000001
43#define VIVS_HI_IDLE_STATE_DE 0x00000002
44#define VIVS_HI_IDLE_STATE_PE 0x00000004
45#define VIVS_HI_IDLE_STATE_SH 0x00000008
46#define VIVS_HI_IDLE_STATE_PA 0x00000010
47#define VIVS_HI_IDLE_STATE_SE 0x00000020
48#define VIVS_HI_IDLE_STATE_RA 0x00000040
49#define VIVS_HI_IDLE_STATE_TX 0x00000080
50#define VIVS_HI_IDLE_STATE_VG 0x00000100
51#define VIVS_HI_IDLE_STATE_IM 0x00000200
52#define VIVS_HI_IDLE_STATE_FP 0x00000400
53#define VIVS_HI_IDLE_STATE_TS 0x00000800
54#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000
55
56#define VIVS_HI_AXI_CONFIG 0x00000008
57#define VIVS_HI_AXI_CONFIG_AWID__MASK 0x0000000f
58#define VIVS_HI_AXI_CONFIG_AWID__SHIFT 0
59#define VIVS_HI_AXI_CONFIG_AWID(x) (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK)
60#define VIVS_HI_AXI_CONFIG_ARID__MASK 0x000000f0
61#define VIVS_HI_AXI_CONFIG_ARID__SHIFT 4
62#define VIVS_HI_AXI_CONFIG_ARID(x) (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK)
63#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK 0x00000f00
64#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT 8
65#define VIVS_HI_AXI_CONFIG_AWCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK)
66#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK 0x0000f000
67#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT 12
68#define VIVS_HI_AXI_CONFIG_ARCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK)
69
70#define VIVS_HI_AXI_STATUS 0x0000000c
71#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK 0x0000000f
72#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT 0
73#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK)
74#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK 0x000000f0
75#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT 4
76#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK)
77#define VIVS_HI_AXI_STATUS_DET_WR_ERR 0x00000100
78#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200
79
80#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010
81#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff
82#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0
83#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
84#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000
85
86#define VIVS_HI_INTR_ENBL 0x00000014
87#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK 0xffffffff
88#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT 0
89#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x) (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK)
90
91#define VIVS_HI_CHIP_IDENTITY 0x00000018
92#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK 0xff000000
93#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT 24
94#define VIVS_HI_CHIP_IDENTITY_FAMILY(x) (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
95#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK 0x00ff0000
96#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT 16
97#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x) (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK)
98#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK 0x0000f000
99#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT 12
100#define VIVS_HI_CHIP_IDENTITY_REVISION(x) (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
101
102#define VIVS_HI_CHIP_FEATURE 0x0000001c
103
104#define VIVS_HI_CHIP_MODEL 0x00000020
105
106#define VIVS_HI_CHIP_REV 0x00000024
107
108#define VIVS_HI_CHIP_DATE 0x00000028
109
110#define VIVS_HI_CHIP_TIME 0x0000002c
111
112#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034
113
114#define VIVS_HI_CACHE_CONTROL 0x00000038
115
116#define VIVS_HI_MEMORY_COUNTER_RESET 0x0000003c
117
118#define VIVS_HI_PROFILE_READ_BYTES8 0x00000040
119
120#define VIVS_HI_PROFILE_WRITE_BYTES8 0x00000044
121
122#define VIVS_HI_CHIP_SPECS 0x00000048
123#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK 0x0000000f
124#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT 0
125#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
126#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK 0x000000f0
127#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT 4
128#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x) (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
129#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK 0x00000f00
130#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT 8
131#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
132#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK 0x0001f000
133#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT 12
134#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
135#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK 0x01f00000
136#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT 20
137#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
138#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK 0x0e000000
139#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT 25
140#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x) (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
141#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK 0xf0000000
142#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT 28
143#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
144
145#define VIVS_HI_PROFILE_WRITE_BURSTS 0x0000004c
146
147#define VIVS_HI_PROFILE_WRITE_REQUESTS 0x00000050
148
149#define VIVS_HI_PROFILE_READ_BURSTS 0x00000058
150
151#define VIVS_HI_PROFILE_READ_REQUESTS 0x0000005c
152
153#define VIVS_HI_PROFILE_READ_LASTS 0x00000060
154
155#define VIVS_HI_GP_OUT0 0x00000064
156
157#define VIVS_HI_GP_OUT1 0x00000068
158
159#define VIVS_HI_GP_OUT2 0x0000006c
160
161#define VIVS_HI_AXI_CONTROL 0x00000070
162#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE 0x00000001
163
164#define VIVS_HI_CHIP_MINOR_FEATURE_1 0x00000074
165
166#define VIVS_HI_PROFILE_TOTAL_CYCLES 0x00000078
167
168#define VIVS_HI_PROFILE_IDLE_CYCLES 0x0000007c
169
170#define VIVS_HI_CHIP_SPECS_2 0x00000080
171#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK 0x000000ff
172#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT 0
173#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
174#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK 0x0000ff00
175#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT 8
176#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
177#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK 0xffff0000
178#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT 16
179#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x) (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
180
181#define VIVS_HI_CHIP_MINOR_FEATURE_2 0x00000084
182
183#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088
184
185#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
186
187#define VIVS_PM 0x00000000
188
189#define VIVS_PM_POWER_CONTROLS 0x00000100
190#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING 0x00000001
191#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING 0x00000002
192#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING 0x00000004
193#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK 0x000000f0
194#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT 4
195#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK)
196#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK 0xffff0000
197#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT 16
198#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK)
199
200#define VIVS_PM_MODULE_CONTROLS 0x00000104
201#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
202#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
203#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
204
205#define VIVS_PM_MODULE_STATUS 0x00000108
206#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
207#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002
208#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004
209
210#define VIVS_PM_PULSE_EATER 0x0000010c
211
212#define VIVS_MMUv2 0x00000000
213
214#define VIVS_MMUv2_SAFE_ADDRESS 0x00000180
215
216#define VIVS_MMUv2_CONFIGURATION 0x00000184
217#define VIVS_MMUv2_CONFIGURATION_MODE__MASK 0x00000001
218#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT 0
219#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K 0x00000000
220#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K 0x00000001
221#define VIVS_MMUv2_CONFIGURATION_MODE_MASK 0x00000008
222#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK 0x00000010
223#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT 4
224#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH 0x00000010
225#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK 0x00000080
226#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK 0x00000100
227#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK 0xfffffc00
228#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT 10
229#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
230
231#define VIVS_MMUv2_STATUS 0x00000188
232#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003
233#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0
234#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
235#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030
236#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4
237#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
238#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300
239#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8
240#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
241#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000
242#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12
243#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
244
245#define VIVS_MMUv2_CONTROL 0x0000018c
246#define VIVS_MMUv2_CONTROL_ENABLE 0x00000001
247
248#define VIVS_MMUv2_EXCEPTION_ADDR(i0) (0x00000190 + 0x4*(i0))
249#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004
250#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004
251
252#define VIVS_MC 0x00000000
253
254#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400
255
256#define VIVS_MC_MMU_TX_PAGE_TABLE 0x00000404
257
258#define VIVS_MC_MMU_PE_PAGE_TABLE 0x00000408
259
260#define VIVS_MC_MMU_PEZ_PAGE_TABLE 0x0000040c
261
262#define VIVS_MC_MMU_RA_PAGE_TABLE 0x00000410
263
264#define VIVS_MC_DEBUG_MEMORY 0x00000414
265#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320 0x00000008
266#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS 0x00100000
267#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS 0x00200000
268
269#define VIVS_MC_MEMORY_BASE_ADDR_RA 0x00000418
270
271#define VIVS_MC_MEMORY_BASE_ADDR_FE 0x0000041c
272
273#define VIVS_MC_MEMORY_BASE_ADDR_TX 0x00000420
274
275#define VIVS_MC_MEMORY_BASE_ADDR_PEZ 0x00000424
276
277#define VIVS_MC_MEMORY_BASE_ADDR_PE 0x00000428
278
279#define VIVS_MC_MEMORY_TIMING_CONTROL 0x0000042c
280
281#define VIVS_MC_MEMORY_FLUSH 0x00000430
282
283#define VIVS_MC_PROFILE_CYCLE_COUNTER 0x00000438
284
285#define VIVS_MC_DEBUG_READ0 0x0000043c
286
287#define VIVS_MC_DEBUG_READ1 0x00000440
288
289#define VIVS_MC_DEBUG_WRITE 0x00000444
290
291#define VIVS_MC_PROFILE_RA_READ 0x00000448
292
293#define VIVS_MC_PROFILE_TX_READ 0x0000044c
294
295#define VIVS_MC_PROFILE_FE_READ 0x00000450
296
297#define VIVS_MC_PROFILE_PE_READ 0x00000454
298
299#define VIVS_MC_PROFILE_DE_READ 0x00000458
300
301#define VIVS_MC_PROFILE_SH_READ 0x0000045c
302
303#define VIVS_MC_PROFILE_PA_READ 0x00000460
304
305#define VIVS_MC_PROFILE_SE_READ 0x00000464
306
307#define VIVS_MC_PROFILE_MC_READ 0x00000468
308
309#define VIVS_MC_PROFILE_HI_READ 0x0000046c
310
311#define VIVS_MC_PROFILE_CONFIG0 0x00000470
312#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f
313#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
314#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
315#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00
316#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
317#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
318#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000
319#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16
320#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000
321#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000
322#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE 0x00020000
323#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000
324#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000
325#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000
326#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000
327#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24
328#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000
329#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000
330#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER 0x08000000
331#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER 0x09000000
332#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER 0x0a000000
333#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER 0x0b000000
334#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER 0x0c000000
335#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER 0x0d000000
336#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER 0x0e000000
337#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000
338
339#define VIVS_MC_PROFILE_CONFIG1 0x00000474
340#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f
341#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0
342#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003
343#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004
344#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER 0x00000005
345#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006
346#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
347#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
348#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
349#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00
350#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
351#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
352#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
353#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
354#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000
355#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
356#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000
357#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000
358#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z 0x00020000
359#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT 0x00030000
360#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER 0x00090000
361#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
362#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
363#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
364#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000
365#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
366#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
367#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000
368#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS 0x02000000
369#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS 0x03000000
370#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN 0x04000000
371#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT 0x05000000
372#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT 0x06000000
373#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT 0x07000000
374#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT 0x08000000
375#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT 0x09000000
376#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000
377
378#define VIVS_MC_PROFILE_CONFIG2 0x00000478
379#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f
380#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0
381#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
382#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
383#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
384#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
385#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00
386#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
387#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
388#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
389#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
390#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
391
392#define VIVS_MC_PROFILE_CONFIG3 0x0000047c
393
394#define VIVS_MC_BUS_CONFIG 0x00000480
395#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK 0x0000000f
396#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT 0
397#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK)
398#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK 0x000000f0
399#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT 4
400#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK)
401
402#define VIVS_MC_START_COMPOSITION 0x00000554
403
404#define VIVS_MC_128B_MERGE 0x00000558
405
406
407#endif /* STATE_HI_XML */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 96e86cf4455b..83efca941388 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -118,7 +118,7 @@ config DRM_EXYNOS_ROTATOR
118 118
119config DRM_EXYNOS_GSC 119config DRM_EXYNOS_GSC
120 bool "GScaler" 120 bool "GScaler"
121 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM 121 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !VIDEO_SAMSUNG_EXYNOS_GSC
122 help 122 help
123 Choose this option if you want to use Exynos GSC for DRM. 123 Choose this option if you want to use Exynos GSC for DRM.
124 124
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index fbe1b3174f75..1bf6a21130c7 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -21,11 +21,11 @@
21 21
22#include "exynos_drm_drv.h" 22#include "exynos_drm_drv.h"
23#include "exynos_drm_crtc.h" 23#include "exynos_drm_crtc.h"
24#include "exynos_drm_fb.h"
24#include "exynos_drm_plane.h" 25#include "exynos_drm_plane.h"
25#include "exynos_drm_iommu.h" 26#include "exynos_drm_iommu.h"
26 27
27#define WINDOWS_NR 3 28#define WINDOWS_NR 3
28#define CURSOR_WIN 2
29#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 29#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
30 30
31static const char * const decon_clks_name[] = { 31static const char * const decon_clks_name[] = {
@@ -56,6 +56,7 @@ struct decon_context {
56 struct drm_device *drm_dev; 56 struct drm_device *drm_dev;
57 struct exynos_drm_crtc *crtc; 57 struct exynos_drm_crtc *crtc;
58 struct exynos_drm_plane planes[WINDOWS_NR]; 58 struct exynos_drm_plane planes[WINDOWS_NR];
59 struct exynos_drm_plane_config configs[WINDOWS_NR];
59 void __iomem *addr; 60 void __iomem *addr;
60 struct clk *clks[ARRAY_SIZE(decon_clks_name)]; 61 struct clk *clks[ARRAY_SIZE(decon_clks_name)];
61 int pipe; 62 int pipe;
@@ -71,6 +72,12 @@ static const uint32_t decon_formats[] = {
71 DRM_FORMAT_ARGB8888, 72 DRM_FORMAT_ARGB8888,
72}; 73};
73 74
75static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
76 DRM_PLANE_TYPE_PRIMARY,
77 DRM_PLANE_TYPE_OVERLAY,
78 DRM_PLANE_TYPE_CURSOR,
79};
80
74static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask, 81static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
75 u32 val) 82 u32 val)
76{ 83{
@@ -241,15 +248,16 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win,
241 protect ? ~0 : 0); 248 protect ? ~0 : 0);
242} 249}
243 250
244static void decon_atomic_begin(struct exynos_drm_crtc *crtc, 251static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
245 struct exynos_drm_plane *plane)
246{ 252{
247 struct decon_context *ctx = crtc->ctx; 253 struct decon_context *ctx = crtc->ctx;
254 int i;
248 255
249 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 256 if (test_bit(BIT_SUSPENDED, &ctx->flags))
250 return; 257 return;
251 258
252 decon_shadow_protect_win(ctx, plane->zpos, true); 259 for (i = ctx->first_win; i < WINDOWS_NR; i++)
260 decon_shadow_protect_win(ctx, i, true);
253} 261}
254 262
255#define BIT_VAL(x, e, s) (((x) & ((1 << ((e) - (s) + 1)) - 1)) << (s)) 263#define BIT_VAL(x, e, s) (((x) & ((1 << ((e) - (s) + 1)) - 1)) << (s))
@@ -259,21 +267,24 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
259static void decon_update_plane(struct exynos_drm_crtc *crtc, 267static void decon_update_plane(struct exynos_drm_crtc *crtc,
260 struct exynos_drm_plane *plane) 268 struct exynos_drm_plane *plane)
261{ 269{
270 struct exynos_drm_plane_state *state =
271 to_exynos_plane_state(plane->base.state);
262 struct decon_context *ctx = crtc->ctx; 272 struct decon_context *ctx = crtc->ctx;
263 struct drm_plane_state *state = plane->base.state; 273 struct drm_framebuffer *fb = state->base.fb;
264 unsigned int win = plane->zpos; 274 unsigned int win = plane->index;
265 unsigned int bpp = state->fb->bits_per_pixel >> 3; 275 unsigned int bpp = fb->bits_per_pixel >> 3;
266 unsigned int pitch = state->fb->pitches[0]; 276 unsigned int pitch = fb->pitches[0];
277 dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
267 u32 val; 278 u32 val;
268 279
269 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 280 if (test_bit(BIT_SUSPENDED, &ctx->flags))
270 return; 281 return;
271 282
272 val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y); 283 val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
273 writel(val, ctx->addr + DECON_VIDOSDxA(win)); 284 writel(val, ctx->addr + DECON_VIDOSDxA(win));
274 285
275 val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) | 286 val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
276 COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1); 287 COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
277 writel(val, ctx->addr + DECON_VIDOSDxB(win)); 288 writel(val, ctx->addr + DECON_VIDOSDxB(win));
278 289
279 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | 290 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -284,20 +295,20 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
284 VIDOSD_Wx_ALPHA_B_F(0x0); 295 VIDOSD_Wx_ALPHA_B_F(0x0);
285 writel(val, ctx->addr + DECON_VIDOSDxD(win)); 296 writel(val, ctx->addr + DECON_VIDOSDxD(win));
286 297
287 writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win)); 298 writel(dma_addr, ctx->addr + DECON_VIDW0xADD0B0(win));
288 299
289 val = plane->dma_addr[0] + pitch * plane->crtc_h; 300 val = dma_addr + pitch * state->src.h;
290 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win)); 301 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
291 302
292 if (ctx->out_type != IFTYPE_HDMI) 303 if (ctx->out_type != IFTYPE_HDMI)
293 val = BIT_VAL(pitch - plane->crtc_w * bpp, 27, 14) 304 val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
294 | BIT_VAL(plane->crtc_w * bpp, 13, 0); 305 | BIT_VAL(state->crtc.w * bpp, 13, 0);
295 else 306 else
296 val = BIT_VAL(pitch - plane->crtc_w * bpp, 29, 15) 307 val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15)
297 | BIT_VAL(plane->crtc_w * bpp, 14, 0); 308 | BIT_VAL(state->crtc.w * bpp, 14, 0);
298 writel(val, ctx->addr + DECON_VIDW0xADD2(win)); 309 writel(val, ctx->addr + DECON_VIDW0xADD2(win));
299 310
300 decon_win_set_pixfmt(ctx, win, state->fb); 311 decon_win_set_pixfmt(ctx, win, fb);
301 312
302 /* window enable */ 313 /* window enable */
303 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 314 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
@@ -310,7 +321,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
310 struct exynos_drm_plane *plane) 321 struct exynos_drm_plane *plane)
311{ 322{
312 struct decon_context *ctx = crtc->ctx; 323 struct decon_context *ctx = crtc->ctx;
313 unsigned int win = plane->zpos; 324 unsigned int win = plane->index;
314 325
315 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 326 if (test_bit(BIT_SUSPENDED, &ctx->flags))
316 return; 327 return;
@@ -326,15 +337,16 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
326 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 337 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
327} 338}
328 339
329static void decon_atomic_flush(struct exynos_drm_crtc *crtc, 340static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
330 struct exynos_drm_plane *plane)
331{ 341{
332 struct decon_context *ctx = crtc->ctx; 342 struct decon_context *ctx = crtc->ctx;
343 int i;
333 344
334 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 345 if (test_bit(BIT_SUSPENDED, &ctx->flags))
335 return; 346 return;
336 347
337 decon_shadow_protect_win(ctx, plane->zpos, false); 348 for (i = ctx->first_win; i < WINDOWS_NR; i++)
349 decon_shadow_protect_win(ctx, i, false);
338 350
339 if (ctx->out_type == IFTYPE_I80) 351 if (ctx->out_type == IFTYPE_I80)
340 set_bit(BIT_WIN_UPDATED, &ctx->flags); 352 set_bit(BIT_WIN_UPDATED, &ctx->flags);
@@ -377,20 +389,12 @@ static void decon_swreset(struct decon_context *ctx)
377static void decon_enable(struct exynos_drm_crtc *crtc) 389static void decon_enable(struct exynos_drm_crtc *crtc)
378{ 390{
379 struct decon_context *ctx = crtc->ctx; 391 struct decon_context *ctx = crtc->ctx;
380 int ret;
381 int i;
382 392
383 if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags)) 393 if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags))
384 return; 394 return;
385 395
386 pm_runtime_get_sync(ctx->dev); 396 pm_runtime_get_sync(ctx->dev);
387 397
388 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
389 ret = clk_prepare_enable(ctx->clks[i]);
390 if (ret < 0)
391 goto err;
392 }
393
394 set_bit(BIT_CLKS_ENABLED, &ctx->flags); 398 set_bit(BIT_CLKS_ENABLED, &ctx->flags);
395 399
396 /* if vblank was enabled status, enable it again. */ 400 /* if vblank was enabled status, enable it again. */
@@ -399,11 +403,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
399 403
400 decon_commit(ctx->crtc); 404 decon_commit(ctx->crtc);
401 405
402 return;
403err:
404 while (--i >= 0)
405 clk_disable_unprepare(ctx->clks[i]);
406
407 set_bit(BIT_SUSPENDED, &ctx->flags); 406 set_bit(BIT_SUSPENDED, &ctx->flags);
408} 407}
409 408
@@ -425,9 +424,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
425 424
426 decon_swreset(ctx); 425 decon_swreset(ctx);
427 426
428 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
429 clk_disable_unprepare(ctx->clks[i]);
430
431 clear_bit(BIT_CLKS_ENABLED, &ctx->flags); 427 clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
432 428
433 pm_runtime_put_sync(ctx->dev); 429 pm_runtime_put_sync(ctx->dev);
@@ -478,7 +474,6 @@ err:
478static struct exynos_drm_crtc_ops decon_crtc_ops = { 474static struct exynos_drm_crtc_ops decon_crtc_ops = {
479 .enable = decon_enable, 475 .enable = decon_enable,
480 .disable = decon_disable, 476 .disable = decon_disable,
481 .commit = decon_commit,
482 .enable_vblank = decon_enable_vblank, 477 .enable_vblank = decon_enable_vblank,
483 .disable_vblank = decon_disable_vblank, 478 .disable_vblank = decon_disable_vblank,
484 .atomic_begin = decon_atomic_begin, 479 .atomic_begin = decon_atomic_begin,
@@ -495,7 +490,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
495 struct exynos_drm_private *priv = drm_dev->dev_private; 490 struct exynos_drm_private *priv = drm_dev->dev_private;
496 struct exynos_drm_plane *exynos_plane; 491 struct exynos_drm_plane *exynos_plane;
497 enum exynos_drm_output_type out_type; 492 enum exynos_drm_output_type out_type;
498 enum drm_plane_type type;
499 unsigned int win; 493 unsigned int win;
500 int ret; 494 int ret;
501 495
@@ -505,10 +499,13 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
505 for (win = ctx->first_win; win < WINDOWS_NR; win++) { 499 for (win = ctx->first_win; win < WINDOWS_NR; win++) {
506 int tmp = (win == ctx->first_win) ? 0 : win; 500 int tmp = (win == ctx->first_win) ? 0 : win;
507 501
508 type = exynos_plane_get_type(tmp, CURSOR_WIN); 502 ctx->configs[win].pixel_formats = decon_formats;
509 ret = exynos_plane_init(drm_dev, &ctx->planes[win], 503 ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
510 1 << ctx->pipe, type, decon_formats, 504 ctx->configs[win].zpos = win;
511 ARRAY_SIZE(decon_formats), win); 505 ctx->configs[win].type = decon_win_types[tmp];
506
507 ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
508 1 << ctx->pipe, &ctx->configs[win]);
512 if (ret) 509 if (ret)
513 return ret; 510 return ret;
514 } 511 }
@@ -581,6 +578,44 @@ out:
581 return IRQ_HANDLED; 578 return IRQ_HANDLED;
582} 579}
583 580
581#ifdef CONFIG_PM
582static int exynos5433_decon_suspend(struct device *dev)
583{
584 struct decon_context *ctx = dev_get_drvdata(dev);
585 int i;
586
587 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
588 clk_disable_unprepare(ctx->clks[i]);
589
590 return 0;
591}
592
593static int exynos5433_decon_resume(struct device *dev)
594{
595 struct decon_context *ctx = dev_get_drvdata(dev);
596 int i, ret;
597
598 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
599 ret = clk_prepare_enable(ctx->clks[i]);
600 if (ret < 0)
601 goto err;
602 }
603
604 return 0;
605
606err:
607 while (--i >= 0)
608 clk_disable_unprepare(ctx->clks[i]);
609
610 return ret;
611}
612#endif
613
614static const struct dev_pm_ops exynos5433_decon_pm_ops = {
615 SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
616 NULL)
617};
618
584static const struct of_device_id exynos5433_decon_driver_dt_match[] = { 619static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
585 { 620 {
586 .compatible = "samsung,exynos5433-decon", 621 .compatible = "samsung,exynos5433-decon",
@@ -684,6 +719,7 @@ struct platform_driver exynos5433_decon_driver = {
684 .remove = exynos5433_decon_remove, 719 .remove = exynos5433_decon_remove,
685 .driver = { 720 .driver = {
686 .name = "exynos5433-decon", 721 .name = "exynos5433-decon",
722 .pm = &exynos5433_decon_pm_ops,
687 .of_match_table = exynos5433_decon_driver_dt_match, 723 .of_match_table = exynos5433_decon_driver_dt_match,
688 }, 724 },
689}; 725};
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index ead2b16e237d..52bda3b42fe0 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,6 +30,7 @@
30#include "exynos_drm_crtc.h" 30#include "exynos_drm_crtc.h"
31#include "exynos_drm_plane.h" 31#include "exynos_drm_plane.h"
32#include "exynos_drm_drv.h" 32#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h"
33#include "exynos_drm_fbdev.h" 34#include "exynos_drm_fbdev.h"
34#include "exynos_drm_iommu.h" 35#include "exynos_drm_iommu.h"
35 36
@@ -40,13 +41,13 @@
40#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 41#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
41 42
42#define WINDOWS_NR 2 43#define WINDOWS_NR 2
43#define CURSOR_WIN 1
44 44
45struct decon_context { 45struct decon_context {
46 struct device *dev; 46 struct device *dev;
47 struct drm_device *drm_dev; 47 struct drm_device *drm_dev;
48 struct exynos_drm_crtc *crtc; 48 struct exynos_drm_crtc *crtc;
49 struct exynos_drm_plane planes[WINDOWS_NR]; 49 struct exynos_drm_plane planes[WINDOWS_NR];
50 struct exynos_drm_plane_config configs[WINDOWS_NR];
50 struct clk *pclk; 51 struct clk *pclk;
51 struct clk *aclk; 52 struct clk *aclk;
52 struct clk *eclk; 53 struct clk *eclk;
@@ -81,6 +82,11 @@ static const uint32_t decon_formats[] = {
81 DRM_FORMAT_BGRA8888, 82 DRM_FORMAT_BGRA8888,
82}; 83};
83 84
85static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
86 DRM_PLANE_TYPE_PRIMARY,
87 DRM_PLANE_TYPE_CURSOR,
88};
89
84static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) 90static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
85{ 91{
86 struct decon_context *ctx = crtc->ctx; 92 struct decon_context *ctx = crtc->ctx;
@@ -119,13 +125,8 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
119 } 125 }
120 126
121 /* Wait for vsync, as disable channel takes effect at next vsync */ 127 /* Wait for vsync, as disable channel takes effect at next vsync */
122 if (ch_enabled) { 128 if (ch_enabled)
123 unsigned int state = ctx->suspended;
124
125 ctx->suspended = 0;
126 decon_wait_for_vblank(ctx->crtc); 129 decon_wait_for_vblank(ctx->crtc);
127 ctx->suspended = state;
128 }
129} 130}
130 131
131static int decon_ctx_initialize(struct decon_context *ctx, 132static int decon_ctx_initialize(struct decon_context *ctx,
@@ -384,30 +385,32 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
384 writel(val, ctx->regs + SHADOWCON); 385 writel(val, ctx->regs + SHADOWCON);
385} 386}
386 387
387static void decon_atomic_begin(struct exynos_drm_crtc *crtc, 388static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
388 struct exynos_drm_plane *plane)
389{ 389{
390 struct decon_context *ctx = crtc->ctx; 390 struct decon_context *ctx = crtc->ctx;
391 int i;
391 392
392 if (ctx->suspended) 393 if (ctx->suspended)
393 return; 394 return;
394 395
395 decon_shadow_protect_win(ctx, plane->zpos, true); 396 for (i = 0; i < WINDOWS_NR; i++)
397 decon_shadow_protect_win(ctx, i, true);
396} 398}
397 399
398static void decon_update_plane(struct exynos_drm_crtc *crtc, 400static void decon_update_plane(struct exynos_drm_crtc *crtc,
399 struct exynos_drm_plane *plane) 401 struct exynos_drm_plane *plane)
400{ 402{
403 struct exynos_drm_plane_state *state =
404 to_exynos_plane_state(plane->base.state);
401 struct decon_context *ctx = crtc->ctx; 405 struct decon_context *ctx = crtc->ctx;
402 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; 406 struct drm_framebuffer *fb = state->base.fb;
403 struct drm_plane_state *state = plane->base.state;
404 int padding; 407 int padding;
405 unsigned long val, alpha; 408 unsigned long val, alpha;
406 unsigned int last_x; 409 unsigned int last_x;
407 unsigned int last_y; 410 unsigned int last_y;
408 unsigned int win = plane->zpos; 411 unsigned int win = plane->index;
409 unsigned int bpp = state->fb->bits_per_pixel >> 3; 412 unsigned int bpp = fb->bits_per_pixel >> 3;
410 unsigned int pitch = state->fb->pitches[0]; 413 unsigned int pitch = fb->pitches[0];
411 414
412 if (ctx->suspended) 415 if (ctx->suspended)
413 return; 416 return;
@@ -423,41 +426,32 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
423 */ 426 */
424 427
425 /* buffer start address */ 428 /* buffer start address */
426 val = (unsigned long)plane->dma_addr[0]; 429 val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
427 writel(val, ctx->regs + VIDW_BUF_START(win)); 430 writel(val, ctx->regs + VIDW_BUF_START(win));
428 431
429 padding = (pitch / bpp) - state->fb->width; 432 padding = (pitch / bpp) - fb->width;
430 433
431 /* buffer size */ 434 /* buffer size */
432 writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win)); 435 writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
433 writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win)); 436 writel(fb->height, ctx->regs + VIDW_WHOLE_Y(win));
434 437
435 /* offset from the start of the buffer to read */ 438 /* offset from the start of the buffer to read */
436 writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win)); 439 writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
437 writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win)); 440 writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
438 441
439 DRM_DEBUG_KMS("start addr = 0x%lx\n", 442 DRM_DEBUG_KMS("start addr = 0x%lx\n",
440 (unsigned long)val); 443 (unsigned long)val);
441 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 444 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
442 plane->crtc_w, plane->crtc_h); 445 state->crtc.w, state->crtc.h);
443
444 /*
445 * OSD position.
446 * In case the window layout goes of LCD layout, DECON fails.
447 */
448 if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
449 plane->crtc_x = mode->hdisplay - plane->crtc_w;
450 if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
451 plane->crtc_y = mode->vdisplay - plane->crtc_h;
452 446
453 val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | 447 val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
454 VIDOSDxA_TOPLEFT_Y(plane->crtc_y); 448 VIDOSDxA_TOPLEFT_Y(state->crtc.y);
455 writel(val, ctx->regs + VIDOSD_A(win)); 449 writel(val, ctx->regs + VIDOSD_A(win));
456 450
457 last_x = plane->crtc_x + plane->crtc_w; 451 last_x = state->crtc.x + state->crtc.w;
458 if (last_x) 452 if (last_x)
459 last_x--; 453 last_x--;
460 last_y = plane->crtc_y + plane->crtc_h; 454 last_y = state->crtc.y + state->crtc.h;
461 if (last_y) 455 if (last_y)
462 last_y--; 456 last_y--;
463 457
@@ -466,7 +460,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
466 writel(val, ctx->regs + VIDOSD_B(win)); 460 writel(val, ctx->regs + VIDOSD_B(win));
467 461
468 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", 462 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
469 plane->crtc_x, plane->crtc_y, last_x, last_y); 463 state->crtc.x, state->crtc.y, last_x, last_y);
470 464
471 /* OSD alpha */ 465 /* OSD alpha */
472 alpha = VIDOSDxC_ALPHA0_R_F(0x0) | 466 alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
@@ -481,7 +475,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
481 475
482 writel(alpha, ctx->regs + VIDOSD_D(win)); 476 writel(alpha, ctx->regs + VIDOSD_D(win));
483 477
484 decon_win_set_pixfmt(ctx, win, state->fb); 478 decon_win_set_pixfmt(ctx, win, fb);
485 479
486 /* hardware window 0 doesn't support color key. */ 480 /* hardware window 0 doesn't support color key. */
487 if (win != 0) 481 if (win != 0)
@@ -505,7 +499,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
505 struct exynos_drm_plane *plane) 499 struct exynos_drm_plane *plane)
506{ 500{
507 struct decon_context *ctx = crtc->ctx; 501 struct decon_context *ctx = crtc->ctx;
508 unsigned int win = plane->zpos; 502 unsigned int win = plane->index;
509 u32 val; 503 u32 val;
510 504
511 if (ctx->suspended) 505 if (ctx->suspended)
@@ -524,15 +518,16 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
524 writel(val, ctx->regs + DECON_UPDATE); 518 writel(val, ctx->regs + DECON_UPDATE);
525} 519}
526 520
527static void decon_atomic_flush(struct exynos_drm_crtc *crtc, 521static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
528 struct exynos_drm_plane *plane)
529{ 522{
530 struct decon_context *ctx = crtc->ctx; 523 struct decon_context *ctx = crtc->ctx;
524 int i;
531 525
532 if (ctx->suspended) 526 if (ctx->suspended)
533 return; 527 return;
534 528
535 decon_shadow_protect_win(ctx, plane->zpos, false); 529 for (i = 0; i < WINDOWS_NR; i++)
530 decon_shadow_protect_win(ctx, i, false);
536} 531}
537 532
538static void decon_init(struct decon_context *ctx) 533static void decon_init(struct decon_context *ctx)
@@ -555,39 +550,12 @@ static void decon_init(struct decon_context *ctx)
555static void decon_enable(struct exynos_drm_crtc *crtc) 550static void decon_enable(struct exynos_drm_crtc *crtc)
556{ 551{
557 struct decon_context *ctx = crtc->ctx; 552 struct decon_context *ctx = crtc->ctx;
558 int ret;
559 553
560 if (!ctx->suspended) 554 if (!ctx->suspended)
561 return; 555 return;
562 556
563 ctx->suspended = false;
564
565 pm_runtime_get_sync(ctx->dev); 557 pm_runtime_get_sync(ctx->dev);
566 558
567 ret = clk_prepare_enable(ctx->pclk);
568 if (ret < 0) {
569 DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
570 return;
571 }
572
573 ret = clk_prepare_enable(ctx->aclk);
574 if (ret < 0) {
575 DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
576 return;
577 }
578
579 ret = clk_prepare_enable(ctx->eclk);
580 if (ret < 0) {
581 DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
582 return;
583 }
584
585 ret = clk_prepare_enable(ctx->vclk);
586 if (ret < 0) {
587 DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
588 return;
589 }
590
591 decon_init(ctx); 559 decon_init(ctx);
592 560
593 /* if vblank was enabled status, enable it again. */ 561 /* if vblank was enabled status, enable it again. */
@@ -595,6 +563,8 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
595 decon_enable_vblank(ctx->crtc); 563 decon_enable_vblank(ctx->crtc);
596 564
597 decon_commit(ctx->crtc); 565 decon_commit(ctx->crtc);
566
567 ctx->suspended = false;
598} 568}
599 569
600static void decon_disable(struct exynos_drm_crtc *crtc) 570static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -613,11 +583,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
613 for (i = 0; i < WINDOWS_NR; i++) 583 for (i = 0; i < WINDOWS_NR; i++)
614 decon_disable_plane(crtc, &ctx->planes[i]); 584 decon_disable_plane(crtc, &ctx->planes[i]);
615 585
616 clk_disable_unprepare(ctx->vclk);
617 clk_disable_unprepare(ctx->eclk);
618 clk_disable_unprepare(ctx->aclk);
619 clk_disable_unprepare(ctx->pclk);
620
621 pm_runtime_put_sync(ctx->dev); 586 pm_runtime_put_sync(ctx->dev);
622 587
623 ctx->suspended = true; 588 ctx->suspended = true;
@@ -679,8 +644,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
679 struct decon_context *ctx = dev_get_drvdata(dev); 644 struct decon_context *ctx = dev_get_drvdata(dev);
680 struct drm_device *drm_dev = data; 645 struct drm_device *drm_dev = data;
681 struct exynos_drm_plane *exynos_plane; 646 struct exynos_drm_plane *exynos_plane;
682 enum drm_plane_type type; 647 unsigned int i;
683 unsigned int zpos;
684 int ret; 648 int ret;
685 649
686 ret = decon_ctx_initialize(ctx, drm_dev); 650 ret = decon_ctx_initialize(ctx, drm_dev);
@@ -689,11 +653,14 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
689 return ret; 653 return ret;
690 } 654 }
691 655
692 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 656 for (i = 0; i < WINDOWS_NR; i++) {
693 type = exynos_plane_get_type(zpos, CURSOR_WIN); 657 ctx->configs[i].pixel_formats = decon_formats;
694 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 658 ctx->configs[i].num_pixel_formats = ARRAY_SIZE(decon_formats);
695 1 << ctx->pipe, type, decon_formats, 659 ctx->configs[i].zpos = i;
696 ARRAY_SIZE(decon_formats), zpos); 660 ctx->configs[i].type = decon_win_types[i];
661
662 ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
663 1 << ctx->pipe, &ctx->configs[i]);
697 if (ret) 664 if (ret)
698 return ret; 665 return ret;
699 } 666 }
@@ -843,11 +810,63 @@ static int decon_remove(struct platform_device *pdev)
843 return 0; 810 return 0;
844} 811}
845 812
813#ifdef CONFIG_PM
814static int exynos7_decon_suspend(struct device *dev)
815{
816 struct decon_context *ctx = dev_get_drvdata(dev);
817
818 clk_disable_unprepare(ctx->vclk);
819 clk_disable_unprepare(ctx->eclk);
820 clk_disable_unprepare(ctx->aclk);
821 clk_disable_unprepare(ctx->pclk);
822
823 return 0;
824}
825
826static int exynos7_decon_resume(struct device *dev)
827{
828 struct decon_context *ctx = dev_get_drvdata(dev);
829 int ret;
830
831 ret = clk_prepare_enable(ctx->pclk);
832 if (ret < 0) {
833 DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
834 return ret;
835 }
836
837 ret = clk_prepare_enable(ctx->aclk);
838 if (ret < 0) {
839 DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
840 return ret;
841 }
842
843 ret = clk_prepare_enable(ctx->eclk);
844 if (ret < 0) {
845 DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
846 return ret;
847 }
848
849 ret = clk_prepare_enable(ctx->vclk);
850 if (ret < 0) {
851 DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
852 return ret;
853 }
854
855 return 0;
856}
857#endif
858
859static const struct dev_pm_ops exynos7_decon_pm_ops = {
860 SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
861 NULL)
862};
863
846struct platform_driver decon_driver = { 864struct platform_driver decon_driver = {
847 .probe = decon_probe, 865 .probe = decon_probe,
848 .remove = decon_remove, 866 .remove = decon_remove,
849 .driver = { 867 .driver = {
850 .name = "exynos-decon", 868 .name = "exynos-decon",
869 .pm = &exynos7_decon_pm_ops,
851 .of_match_table = decon_driver_dt_match, 870 .of_match_table = decon_driver_dt_match,
852 }, 871 },
853}; 872};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 124fb9a56f02..b79c316c2ad2 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -953,7 +953,7 @@ static void exynos_dp_connector_destroy(struct drm_connector *connector)
953 drm_connector_cleanup(connector); 953 drm_connector_cleanup(connector);
954} 954}
955 955
956static struct drm_connector_funcs exynos_dp_connector_funcs = { 956static const struct drm_connector_funcs exynos_dp_connector_funcs = {
957 .dpms = drm_atomic_helper_connector_dpms, 957 .dpms = drm_atomic_helper_connector_dpms,
958 .fill_modes = drm_helper_probe_single_connector_modes, 958 .fill_modes = drm_helper_probe_single_connector_modes,
959 .detect = exynos_dp_detect, 959 .detect = exynos_dp_detect,
@@ -998,7 +998,7 @@ static struct drm_encoder *exynos_dp_best_encoder(
998 return &dp->encoder; 998 return &dp->encoder;
999} 999}
1000 1000
1001static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { 1001static const struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
1002 .get_modes = exynos_dp_get_modes, 1002 .get_modes = exynos_dp_get_modes,
1003 .best_encoder = exynos_dp_best_encoder, 1003 .best_encoder = exynos_dp_best_encoder,
1004}; 1004};
@@ -1009,9 +1009,9 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
1009{ 1009{
1010 int ret; 1010 int ret;
1011 1011
1012 encoder->bridge = dp->bridge; 1012 encoder->bridge->next = dp->ptn_bridge;
1013 dp->bridge->encoder = encoder; 1013 dp->ptn_bridge->encoder = encoder;
1014 ret = drm_bridge_attach(encoder->dev, dp->bridge); 1014 ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
1015 if (ret) { 1015 if (ret) {
1016 DRM_ERROR("Failed to attach bridge to drm\n"); 1016 DRM_ERROR("Failed to attach bridge to drm\n");
1017 return ret; 1017 return ret;
@@ -1020,14 +1020,15 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
1023static int exynos_dp_create_connector(struct drm_encoder *encoder) 1023static int exynos_dp_bridge_attach(struct drm_bridge *bridge)
1024{ 1024{
1025 struct exynos_dp_device *dp = encoder_to_dp(encoder); 1025 struct exynos_dp_device *dp = bridge->driver_private;
1026 struct drm_encoder *encoder = &dp->encoder;
1026 struct drm_connector *connector = &dp->connector; 1027 struct drm_connector *connector = &dp->connector;
1027 int ret; 1028 int ret;
1028 1029
1029 /* Pre-empt DP connector creation if there's a bridge */ 1030 /* Pre-empt DP connector creation if there's a bridge */
1030 if (dp->bridge) { 1031 if (dp->ptn_bridge) {
1031 ret = exynos_drm_attach_lcd_bridge(dp, encoder); 1032 ret = exynos_drm_attach_lcd_bridge(dp, encoder);
1032 if (!ret) 1033 if (!ret)
1033 return 0; 1034 return 0;
@@ -1052,27 +1053,16 @@ static int exynos_dp_create_connector(struct drm_encoder *encoder)
1052 return ret; 1053 return ret;
1053} 1054}
1054 1055
1055static bool exynos_dp_mode_fixup(struct drm_encoder *encoder, 1056static void exynos_dp_bridge_enable(struct drm_bridge *bridge)
1056 const struct drm_display_mode *mode,
1057 struct drm_display_mode *adjusted_mode)
1058{
1059 return true;
1060}
1061
1062static void exynos_dp_mode_set(struct drm_encoder *encoder,
1063 struct drm_display_mode *mode,
1064 struct drm_display_mode *adjusted_mode)
1065{
1066}
1067
1068static void exynos_dp_enable(struct drm_encoder *encoder)
1069{ 1057{
1070 struct exynos_dp_device *dp = encoder_to_dp(encoder); 1058 struct exynos_dp_device *dp = bridge->driver_private;
1071 struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1059 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1072 1060
1073 if (dp->dpms_mode == DRM_MODE_DPMS_ON) 1061 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1074 return; 1062 return;
1075 1063
1064 pm_runtime_get_sync(dp->dev);
1065
1076 if (dp->panel) { 1066 if (dp->panel) {
1077 if (drm_panel_prepare(dp->panel)) { 1067 if (drm_panel_prepare(dp->panel)) {
1078 DRM_ERROR("failed to setup the panel\n"); 1068 DRM_ERROR("failed to setup the panel\n");
@@ -1083,7 +1073,6 @@ static void exynos_dp_enable(struct drm_encoder *encoder)
1083 if (crtc->ops->clock_enable) 1073 if (crtc->ops->clock_enable)
1084 crtc->ops->clock_enable(dp_to_crtc(dp), true); 1074 crtc->ops->clock_enable(dp_to_crtc(dp), true);
1085 1075
1086 clk_prepare_enable(dp->clock);
1087 phy_power_on(dp->phy); 1076 phy_power_on(dp->phy);
1088 exynos_dp_init_dp(dp); 1077 exynos_dp_init_dp(dp);
1089 enable_irq(dp->irq); 1078 enable_irq(dp->irq);
@@ -1092,9 +1081,9 @@ static void exynos_dp_enable(struct drm_encoder *encoder)
1092 dp->dpms_mode = DRM_MODE_DPMS_ON; 1081 dp->dpms_mode = DRM_MODE_DPMS_ON;
1093} 1082}
1094 1083
1095static void exynos_dp_disable(struct drm_encoder *encoder) 1084static void exynos_dp_bridge_disable(struct drm_bridge *bridge)
1096{ 1085{
1097 struct exynos_dp_device *dp = encoder_to_dp(encoder); 1086 struct exynos_dp_device *dp = bridge->driver_private;
1098 struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1087 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1099 1088
1100 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1089 if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1110,7 +1099,6 @@ static void exynos_dp_disable(struct drm_encoder *encoder)
1110 disable_irq(dp->irq); 1099 disable_irq(dp->irq);
1111 flush_work(&dp->hotplug_work); 1100 flush_work(&dp->hotplug_work);
1112 phy_power_off(dp->phy); 1101 phy_power_off(dp->phy);
1113 clk_disable_unprepare(dp->clock);
1114 1102
1115 if (crtc->ops->clock_enable) 1103 if (crtc->ops->clock_enable)
1116 crtc->ops->clock_enable(dp_to_crtc(dp), false); 1104 crtc->ops->clock_enable(dp_to_crtc(dp), false);
@@ -1120,17 +1108,82 @@ static void exynos_dp_disable(struct drm_encoder *encoder)
1120 DRM_ERROR("failed to turnoff the panel\n"); 1108 DRM_ERROR("failed to turnoff the panel\n");
1121 } 1109 }
1122 1110
1111 pm_runtime_put_sync(dp->dev);
1112
1123 dp->dpms_mode = DRM_MODE_DPMS_OFF; 1113 dp->dpms_mode = DRM_MODE_DPMS_OFF;
1124} 1114}
1125 1115
1126static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { 1116static void exynos_dp_bridge_nop(struct drm_bridge *bridge)
1117{
1118 /* do nothing */
1119}
1120
1121static const struct drm_bridge_funcs exynos_dp_bridge_funcs = {
1122 .enable = exynos_dp_bridge_enable,
1123 .disable = exynos_dp_bridge_disable,
1124 .pre_enable = exynos_dp_bridge_nop,
1125 .post_disable = exynos_dp_bridge_nop,
1126 .attach = exynos_dp_bridge_attach,
1127};
1128
1129static int exynos_dp_create_connector(struct drm_encoder *encoder)
1130{
1131 struct exynos_dp_device *dp = encoder_to_dp(encoder);
1132 struct drm_device *drm_dev = dp->drm_dev;
1133 struct drm_bridge *bridge;
1134 int ret;
1135
1136 bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
1137 if (!bridge) {
1138 DRM_ERROR("failed to allocate for drm bridge\n");
1139 return -ENOMEM;
1140 }
1141
1142 dp->bridge = bridge;
1143
1144 encoder->bridge = bridge;
1145 bridge->driver_private = dp;
1146 bridge->encoder = encoder;
1147 bridge->funcs = &exynos_dp_bridge_funcs;
1148
1149 ret = drm_bridge_attach(drm_dev, bridge);
1150 if (ret) {
1151 DRM_ERROR("failed to attach drm bridge\n");
1152 return -EINVAL;
1153 }
1154
1155 return 0;
1156}
1157
1158static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
1159 const struct drm_display_mode *mode,
1160 struct drm_display_mode *adjusted_mode)
1161{
1162 return true;
1163}
1164
1165static void exynos_dp_mode_set(struct drm_encoder *encoder,
1166 struct drm_display_mode *mode,
1167 struct drm_display_mode *adjusted_mode)
1168{
1169}
1170
1171static void exynos_dp_enable(struct drm_encoder *encoder)
1172{
1173}
1174
1175static void exynos_dp_disable(struct drm_encoder *encoder)
1176{
1177}
1178
1179static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
1127 .mode_fixup = exynos_dp_mode_fixup, 1180 .mode_fixup = exynos_dp_mode_fixup,
1128 .mode_set = exynos_dp_mode_set, 1181 .mode_set = exynos_dp_mode_set,
1129 .enable = exynos_dp_enable, 1182 .enable = exynos_dp_enable,
1130 .disable = exynos_dp_disable, 1183 .disable = exynos_dp_disable,
1131}; 1184};
1132 1185
1133static struct drm_encoder_funcs exynos_dp_encoder_funcs = { 1186static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
1134 .destroy = drm_encoder_cleanup, 1187 .destroy = drm_encoder_cleanup,
1135}; 1188};
1136 1189
@@ -1238,7 +1291,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1238 } 1291 }
1239 } 1292 }
1240 1293
1241 if (!dp->panel && !dp->bridge) { 1294 if (!dp->panel && !dp->ptn_bridge) {
1242 ret = exynos_dp_dt_parse_panel(dp); 1295 ret = exynos_dp_dt_parse_panel(dp);
1243 if (ret) 1296 if (ret)
1244 return ret; 1297 return ret;
@@ -1289,10 +1342,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1289 1342
1290 INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); 1343 INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
1291 1344
1292 phy_power_on(dp->phy);
1293
1294 exynos_dp_init_dp(dp);
1295
1296 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 1345 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
1297 irq_flags, "exynos-dp", dp); 1346 irq_flags, "exynos-dp", dp);
1298 if (ret) { 1347 if (ret) {
@@ -1313,7 +1362,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1313 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1362 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1314 1363
1315 drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, 1364 drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
1316 DRM_MODE_ENCODER_TMDS); 1365 DRM_MODE_ENCODER_TMDS, NULL);
1317 1366
1318 drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); 1367 drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
1319 1368
@@ -1343,8 +1392,9 @@ static const struct component_ops exynos_dp_ops = {
1343static int exynos_dp_probe(struct platform_device *pdev) 1392static int exynos_dp_probe(struct platform_device *pdev)
1344{ 1393{
1345 struct device *dev = &pdev->dev; 1394 struct device *dev = &pdev->dev;
1346 struct device_node *panel_node, *bridge_node, *endpoint; 1395 struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
1347 struct exynos_dp_device *dp; 1396 struct exynos_dp_device *dp;
1397 int ret;
1348 1398
1349 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), 1399 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
1350 GFP_KERNEL); 1400 GFP_KERNEL);
@@ -1353,36 +1403,96 @@ static int exynos_dp_probe(struct platform_device *pdev)
1353 1403
1354 platform_set_drvdata(pdev, dp); 1404 platform_set_drvdata(pdev, dp);
1355 1405
1406 /* This is for the backward compatibility. */
1356 panel_node = of_parse_phandle(dev->of_node, "panel", 0); 1407 panel_node = of_parse_phandle(dev->of_node, "panel", 0);
1357 if (panel_node) { 1408 if (panel_node) {
1358 dp->panel = of_drm_find_panel(panel_node); 1409 dp->panel = of_drm_find_panel(panel_node);
1359 of_node_put(panel_node); 1410 of_node_put(panel_node);
1360 if (!dp->panel) 1411 if (!dp->panel)
1361 return -EPROBE_DEFER; 1412 return -EPROBE_DEFER;
1413 } else {
1414 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1415 if (endpoint) {
1416 panel_node = of_graph_get_remote_port_parent(endpoint);
1417 if (panel_node) {
1418 dp->panel = of_drm_find_panel(panel_node);
1419 of_node_put(panel_node);
1420 if (!dp->panel)
1421 return -EPROBE_DEFER;
1422 } else {
1423 DRM_ERROR("no port node for panel device.\n");
1424 return -EINVAL;
1425 }
1426 }
1362 } 1427 }
1363 1428
1429 if (endpoint)
1430 goto out;
1431
1364 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 1432 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1365 if (endpoint) { 1433 if (endpoint) {
1366 bridge_node = of_graph_get_remote_port_parent(endpoint); 1434 bridge_node = of_graph_get_remote_port_parent(endpoint);
1367 if (bridge_node) { 1435 if (bridge_node) {
1368 dp->bridge = of_drm_find_bridge(bridge_node); 1436 dp->ptn_bridge = of_drm_find_bridge(bridge_node);
1369 of_node_put(bridge_node); 1437 of_node_put(bridge_node);
1370 if (!dp->bridge) 1438 if (!dp->ptn_bridge)
1371 return -EPROBE_DEFER; 1439 return -EPROBE_DEFER;
1372 } else 1440 } else
1373 return -EPROBE_DEFER; 1441 return -EPROBE_DEFER;
1374 } 1442 }
1375 1443
1376 return component_add(&pdev->dev, &exynos_dp_ops); 1444out:
1445 pm_runtime_enable(dev);
1446
1447 ret = component_add(&pdev->dev, &exynos_dp_ops);
1448 if (ret)
1449 goto err_disable_pm_runtime;
1450
1451 return ret;
1452
1453err_disable_pm_runtime:
1454 pm_runtime_disable(dev);
1455
1456 return ret;
1377} 1457}
1378 1458
1379static int exynos_dp_remove(struct platform_device *pdev) 1459static int exynos_dp_remove(struct platform_device *pdev)
1380{ 1460{
1461 pm_runtime_disable(&pdev->dev);
1381 component_del(&pdev->dev, &exynos_dp_ops); 1462 component_del(&pdev->dev, &exynos_dp_ops);
1382 1463
1383 return 0; 1464 return 0;
1384} 1465}
1385 1466
1467#ifdef CONFIG_PM
1468static int exynos_dp_suspend(struct device *dev)
1469{
1470 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1471
1472 clk_disable_unprepare(dp->clock);
1473
1474 return 0;
1475}
1476
1477static int exynos_dp_resume(struct device *dev)
1478{
1479 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1480 int ret;
1481
1482 ret = clk_prepare_enable(dp->clock);
1483 if (ret < 0) {
1484 DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
1485 return ret;
1486 }
1487
1488 return 0;
1489}
1490#endif
1491
1492static const struct dev_pm_ops exynos_dp_pm_ops = {
1493 SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
1494};
1495
1386static const struct of_device_id exynos_dp_match[] = { 1496static const struct of_device_id exynos_dp_match[] = {
1387 { .compatible = "samsung,exynos5-dp" }, 1497 { .compatible = "samsung,exynos5-dp" },
1388 {}, 1498 {},
@@ -1395,6 +1505,7 @@ struct platform_driver dp_driver = {
1395 .driver = { 1505 .driver = {
1396 .name = "exynos-dp", 1506 .name = "exynos-dp",
1397 .owner = THIS_MODULE, 1507 .owner = THIS_MODULE,
1508 .pm = &exynos_dp_pm_ops,
1398 .of_match_table = exynos_dp_match, 1509 .of_match_table = exynos_dp_match,
1399 }, 1510 },
1400}; 1511};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index e413b6f7b0e7..66eec4b2d5c6 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -153,6 +153,7 @@ struct exynos_dp_device {
153 struct drm_connector connector; 153 struct drm_connector connector;
154 struct drm_panel *panel; 154 struct drm_panel *panel;
155 struct drm_bridge *bridge; 155 struct drm_bridge *bridge;
156 struct drm_bridge *ptn_bridge;
156 struct clk *clock; 157 struct clk *clock;
157 unsigned int irq; 158 unsigned int irq;
158 void __iomem *reg_base; 159 void __iomem *reg_base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b3ba27fd9a6b..e36579c1c025 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
55{ 55{
56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
57 57
58 if (!state->enable)
59 return 0;
60
58 if (exynos_crtc->ops->atomic_check) 61 if (exynos_crtc->ops->atomic_check)
59 return exynos_crtc->ops->atomic_check(exynos_crtc, state); 62 return exynos_crtc->ops->atomic_check(exynos_crtc, state);
60 63
@@ -65,35 +68,23 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
65 struct drm_crtc_state *old_crtc_state) 68 struct drm_crtc_state *old_crtc_state)
66{ 69{
67 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 70 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
68 struct drm_plane *plane;
69 71
70 exynos_crtc->event = crtc->state->event; 72 exynos_crtc->event = crtc->state->event;
71 73
72 drm_atomic_crtc_for_each_plane(plane, crtc) { 74 if (exynos_crtc->ops->atomic_begin)
73 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 75 exynos_crtc->ops->atomic_begin(exynos_crtc);
74
75 if (exynos_crtc->ops->atomic_begin)
76 exynos_crtc->ops->atomic_begin(exynos_crtc,
77 exynos_plane);
78 }
79} 76}
80 77
81static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, 78static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
82 struct drm_crtc_state *old_crtc_state) 79 struct drm_crtc_state *old_crtc_state)
83{ 80{
84 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 81 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
85 struct drm_plane *plane;
86
87 drm_atomic_crtc_for_each_plane(plane, crtc) {
88 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
89 82
90 if (exynos_crtc->ops->atomic_flush) 83 if (exynos_crtc->ops->atomic_flush)
91 exynos_crtc->ops->atomic_flush(exynos_crtc, 84 exynos_crtc->ops->atomic_flush(exynos_crtc);
92 exynos_plane);
93 }
94} 85}
95 86
96static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { 87static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
97 .enable = exynos_drm_crtc_enable, 88 .enable = exynos_drm_crtc_enable,
98 .disable = exynos_drm_crtc_disable, 89 .disable = exynos_drm_crtc_disable,
99 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, 90 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
@@ -113,7 +104,7 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
113 kfree(exynos_crtc); 104 kfree(exynos_crtc);
114} 105}
115 106
116static struct drm_crtc_funcs exynos_crtc_funcs = { 107static const struct drm_crtc_funcs exynos_crtc_funcs = {
117 .set_config = drm_atomic_helper_set_config, 108 .set_config = drm_atomic_helper_set_config,
118 .page_flip = drm_atomic_helper_page_flip, 109 .page_flip = drm_atomic_helper_page_flip,
119 .destroy = exynos_drm_crtc_destroy, 110 .destroy = exynos_drm_crtc_destroy,
@@ -150,7 +141,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
150 private->crtc[pipe] = crtc; 141 private->crtc[pipe] = crtc;
151 142
152 ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL, 143 ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL,
153 &exynos_crtc_funcs); 144 &exynos_crtc_funcs, NULL);
154 if (ret < 0) 145 if (ret < 0)
155 goto err_crtc; 146 goto err_crtc;
156 147
@@ -212,29 +203,6 @@ void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
212 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 203 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
213} 204}
214 205
215void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
216{
217 struct exynos_drm_crtc *exynos_crtc;
218 struct drm_device *dev = fb->dev;
219 struct drm_crtc *crtc;
220
221 /*
222 * make sure that overlay data are updated to real hardware
223 * for all encoders.
224 */
225 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
226 exynos_crtc = to_exynos_crtc(crtc);
227
228 /*
229 * wait for vblank interrupt
230 * - this makes sure that overlay data are updated to
231 * real hardware.
232 */
233 if (exynos_crtc->ops->wait_for_vblank)
234 exynos_crtc->ops->wait_for_vblank(exynos_crtc);
235 }
236}
237
238int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, 206int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
239 enum exynos_drm_output_type out_type) 207 enum exynos_drm_output_type out_type)
240{ 208{
@@ -258,3 +226,29 @@ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
258 if (exynos_crtc->ops->te_handler) 226 if (exynos_crtc->ops->te_handler)
259 exynos_crtc->ops->te_handler(exynos_crtc); 227 exynos_crtc->ops->te_handler(exynos_crtc);
260} 228}
229
230void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc,
231 struct drm_file *file)
232{
233 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
234 struct drm_pending_vblank_event *e;
235 unsigned long flags;
236
237 spin_lock_irqsave(&crtc->dev->event_lock, flags);
238 e = exynos_crtc->event;
239 if (e && e->base.file_priv == file) {
240 exynos_crtc->event = NULL;
241 /*
242 * event will be destroyed by core part
243 * so below line should be removed later with core changes
244 */
245 e->base.destroy(&e->base);
246 /*
247 * event_space will be increased by core part
248 * so below line should be removed later with core changes.
249 */
250 file->event_space += sizeof(e->event);
251 atomic_dec(&exynos_crtc->pending_update);
252 }
253 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
254}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index f9f365bd0257..cfdcf3e4eb1b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -28,7 +28,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
28void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc); 28void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
29void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, 29void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
30 struct exynos_drm_plane *exynos_plane); 30 struct exynos_drm_plane *exynos_plane);
31void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
32 31
33/* This function gets pipe value to crtc device matched with out_type. */ 32/* This function gets pipe value to crtc device matched with out_type. */
34int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, 33int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
@@ -41,4 +40,8 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
41 */ 40 */
42void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); 41void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
43 42
43/* This function cancels a page flip request. */
44void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc,
45 struct drm_file *file);
46
44#endif 47#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index c748b8790de3..05350ae0785b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -57,7 +57,7 @@ static void exynos_dpi_connector_destroy(struct drm_connector *connector)
57 drm_connector_cleanup(connector); 57 drm_connector_cleanup(connector);
58} 58}
59 59
60static struct drm_connector_funcs exynos_dpi_connector_funcs = { 60static const struct drm_connector_funcs exynos_dpi_connector_funcs = {
61 .dpms = drm_atomic_helper_connector_dpms, 61 .dpms = drm_atomic_helper_connector_dpms,
62 .detect = exynos_dpi_detect, 62 .detect = exynos_dpi_detect,
63 .fill_modes = drm_helper_probe_single_connector_modes, 63 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -100,7 +100,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
100 return &ctx->encoder; 100 return &ctx->encoder;
101} 101}
102 102
103static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { 103static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
104 .get_modes = exynos_dpi_get_modes, 104 .get_modes = exynos_dpi_get_modes,
105 .best_encoder = exynos_dpi_best_encoder, 105 .best_encoder = exynos_dpi_best_encoder,
106}; 106};
@@ -161,14 +161,14 @@ static void exynos_dpi_disable(struct drm_encoder *encoder)
161 } 161 }
162} 162}
163 163
164static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = { 164static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
165 .mode_fixup = exynos_dpi_mode_fixup, 165 .mode_fixup = exynos_dpi_mode_fixup,
166 .mode_set = exynos_dpi_mode_set, 166 .mode_set = exynos_dpi_mode_set,
167 .enable = exynos_dpi_enable, 167 .enable = exynos_dpi_enable,
168 .disable = exynos_dpi_disable, 168 .disable = exynos_dpi_disable,
169}; 169};
170 170
171static struct drm_encoder_funcs exynos_dpi_encoder_funcs = { 171static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
172 .destroy = drm_encoder_cleanup, 172 .destroy = drm_encoder_cleanup,
173}; 173};
174 174
@@ -309,7 +309,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
309 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 309 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
310 310
311 drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs, 311 drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
312 DRM_MODE_ENCODER_TMDS); 312 DRM_MODE_ENCODER_TMDS, NULL);
313 313
314 drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs); 314 drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
315 315
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2c6019d6a205..68f0f36f6e7e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,45 +304,6 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
304 return 0; 304 return 0;
305} 305}
306 306
307#ifdef CONFIG_PM_SLEEP
308static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
309{
310 struct drm_connector *connector;
311
312 drm_modeset_lock_all(dev);
313 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
314 int old_dpms = connector->dpms;
315
316 if (connector->funcs->dpms)
317 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
318
319 /* Set the old mode back to the connector for resume */
320 connector->dpms = old_dpms;
321 }
322 drm_modeset_unlock_all(dev);
323
324 return 0;
325}
326
327static int exynos_drm_resume(struct drm_device *dev)
328{
329 struct drm_connector *connector;
330
331 drm_modeset_lock_all(dev);
332 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
333 if (connector->funcs->dpms) {
334 int dpms = connector->dpms;
335
336 connector->dpms = DRM_MODE_DPMS_OFF;
337 connector->funcs->dpms(connector, dpms);
338 }
339 }
340 drm_modeset_unlock_all(dev);
341
342 return 0;
343}
344#endif
345
346static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 307static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
347{ 308{
348 struct drm_exynos_file_private *file_priv; 309 struct drm_exynos_file_private *file_priv;
@@ -369,7 +330,12 @@ err_file_priv_free:
369static void exynos_drm_preclose(struct drm_device *dev, 330static void exynos_drm_preclose(struct drm_device *dev,
370 struct drm_file *file) 331 struct drm_file *file)
371{ 332{
333 struct drm_crtc *crtc;
334
372 exynos_drm_subdrv_close(dev, file); 335 exynos_drm_subdrv_close(dev, file);
336
337 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
338 exynos_drm_crtc_cancel_page_flip(crtc, file);
373} 339}
374 340
375static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 341static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
@@ -476,31 +442,54 @@ static struct drm_driver exynos_drm_driver = {
476}; 442};
477 443
478#ifdef CONFIG_PM_SLEEP 444#ifdef CONFIG_PM_SLEEP
479static int exynos_drm_sys_suspend(struct device *dev) 445static int exynos_drm_suspend(struct device *dev)
480{ 446{
481 struct drm_device *drm_dev = dev_get_drvdata(dev); 447 struct drm_device *drm_dev = dev_get_drvdata(dev);
482 pm_message_t message; 448 struct drm_connector *connector;
483 449
484 if (pm_runtime_suspended(dev) || !drm_dev) 450 if (pm_runtime_suspended(dev) || !drm_dev)
485 return 0; 451 return 0;
486 452
487 message.event = PM_EVENT_SUSPEND; 453 drm_modeset_lock_all(drm_dev);
488 return exynos_drm_suspend(drm_dev, message); 454 drm_for_each_connector(connector, drm_dev) {
455 int old_dpms = connector->dpms;
456
457 if (connector->funcs->dpms)
458 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
459
460 /* Set the old mode back to the connector for resume */
461 connector->dpms = old_dpms;
462 }
463 drm_modeset_unlock_all(drm_dev);
464
465 return 0;
489} 466}
490 467
491static int exynos_drm_sys_resume(struct device *dev) 468static int exynos_drm_resume(struct device *dev)
492{ 469{
493 struct drm_device *drm_dev = dev_get_drvdata(dev); 470 struct drm_device *drm_dev = dev_get_drvdata(dev);
471 struct drm_connector *connector;
494 472
495 if (pm_runtime_suspended(dev) || !drm_dev) 473 if (pm_runtime_suspended(dev) || !drm_dev)
496 return 0; 474 return 0;
497 475
498 return exynos_drm_resume(drm_dev); 476 drm_modeset_lock_all(drm_dev);
477 drm_for_each_connector(connector, drm_dev) {
478 if (connector->funcs->dpms) {
479 int dpms = connector->dpms;
480
481 connector->dpms = DRM_MODE_DPMS_OFF;
482 connector->funcs->dpms(connector, dpms);
483 }
484 }
485 drm_modeset_unlock_all(drm_dev);
486
487 return 0;
499} 488}
500#endif 489#endif
501 490
502static const struct dev_pm_ops exynos_drm_pm_ops = { 491static const struct dev_pm_ops exynos_drm_pm_ops = {
503 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume) 492 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
504}; 493};
505 494
506/* forward declaration */ 495/* forward declaration */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index f1eda7fa4e3c..17b5ded72ff1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -38,25 +38,46 @@ enum exynos_drm_output_type {
38 EXYNOS_DISPLAY_TYPE_VIDI, 38 EXYNOS_DISPLAY_TYPE_VIDI,
39}; 39};
40 40
41struct exynos_drm_rect {
42 unsigned int x, y;
43 unsigned int w, h;
44};
45
41/* 46/*
42 * Exynos drm common overlay structure. 47 * Exynos drm plane state structure.
43 * 48 *
44 * @base: plane object 49 * @base: plane_state object (contains drm_framebuffer pointer)
45 * @src_x: offset x on a framebuffer to be displayed. 50 * @src: rectangle of the source image data to be displayed (clipped to
46 * - the unit is screen coordinates. 51 * visible part).
47 * @src_y: offset y on a framebuffer to be displayed. 52 * @crtc: rectangle of the target image position on hardware screen
48 * - the unit is screen coordinates. 53 * (clipped to visible part).
49 * @src_w: width of a partial image to be displayed from framebuffer.
50 * @src_h: height of a partial image to be displayed from framebuffer.
51 * @crtc_x: offset x on hardware screen.
52 * @crtc_y: offset y on hardware screen.
53 * @crtc_w: window width to be displayed (hardware screen).
54 * @crtc_h: window height to be displayed (hardware screen).
55 * @h_ratio: horizontal scaling ratio, 16.16 fixed point 54 * @h_ratio: horizontal scaling ratio, 16.16 fixed point
56 * @v_ratio: vertical scaling ratio, 16.16 fixed point 55 * @v_ratio: vertical scaling ratio, 16.16 fixed point
57 * @dma_addr: array of bus(accessed by dma) address to the memory region 56 *
58 * allocated for a overlay. 57 * this structure consists plane state data that will be applied to hardware
59 * @zpos: order of overlay layer(z position). 58 * specific overlay info.
59 */
60
61struct exynos_drm_plane_state {
62 struct drm_plane_state base;
63 struct exynos_drm_rect crtc;
64 struct exynos_drm_rect src;
65 unsigned int h_ratio;
66 unsigned int v_ratio;
67 unsigned int zpos;
68};
69
70static inline struct exynos_drm_plane_state *
71to_exynos_plane_state(struct drm_plane_state *state)
72{
73 return container_of(state, struct exynos_drm_plane_state, base);
74}
75
76/*
77 * Exynos drm common overlay structure.
78 *
79 * @base: plane object
80 * @index: hardware index of the overlay layer
60 * 81 *
61 * this structure is common to exynos SoC and its contents would be copied 82 * this structure is common to exynos SoC and its contents would be copied
62 * to hardware specific overlay info. 83 * to hardware specific overlay info.
@@ -64,21 +85,33 @@ enum exynos_drm_output_type {
64 85
65struct exynos_drm_plane { 86struct exynos_drm_plane {
66 struct drm_plane base; 87 struct drm_plane base;
67 unsigned int src_x; 88 const struct exynos_drm_plane_config *config;
68 unsigned int src_y; 89 unsigned int index;
69 unsigned int src_w;
70 unsigned int src_h;
71 unsigned int crtc_x;
72 unsigned int crtc_y;
73 unsigned int crtc_w;
74 unsigned int crtc_h;
75 unsigned int h_ratio;
76 unsigned int v_ratio;
77 dma_addr_t dma_addr[MAX_FB_BUFFER];
78 unsigned int zpos;
79 struct drm_framebuffer *pending_fb; 90 struct drm_framebuffer *pending_fb;
80}; 91};
81 92
93#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
94#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
95#define EXYNOS_DRM_PLANE_CAP_ZPOS (1 << 2)
96
97/*
98 * Exynos DRM plane configuration structure.
99 *
100 * @zpos: initial z-position of the plane.
101 * @type: type of the plane (primary, cursor or overlay).
102 * @pixel_formats: supported pixel formats.
103 * @num_pixel_formats: number of elements in 'pixel_formats'.
104 * @capabilities: supported features (see EXYNOS_DRM_PLANE_CAP_*)
105 */
106
107struct exynos_drm_plane_config {
108 unsigned int zpos;
109 enum drm_plane_type type;
110 const uint32_t *pixel_formats;
111 unsigned int num_pixel_formats;
112 unsigned int capabilities;
113};
114
82/* 115/*
83 * Exynos drm crtc ops 116 * Exynos drm crtc ops
84 * 117 *
@@ -90,8 +123,8 @@ struct exynos_drm_plane {
90 * @wait_for_vblank: wait for vblank interrupt to make sure that 123 * @wait_for_vblank: wait for vblank interrupt to make sure that
91 * hardware overlay is updated. 124 * hardware overlay is updated.
92 * @atomic_check: validate state 125 * @atomic_check: validate state
93 * @atomic_begin: prepare a window to receive a update 126 * @atomic_begin: prepare device to receive an update
94 * @atomic_flush: mark the end of a window update 127 * @atomic_flush: mark the end of device update
95 * @update_plane: apply hardware specific overlay data to registers. 128 * @update_plane: apply hardware specific overlay data to registers.
96 * @disable_plane: disable hardware specific overlay. 129 * @disable_plane: disable hardware specific overlay.
97 * @te_handler: trigger to transfer video image at the tearing effect 130 * @te_handler: trigger to transfer video image at the tearing effect
@@ -111,14 +144,12 @@ struct exynos_drm_crtc_ops {
111 void (*wait_for_vblank)(struct exynos_drm_crtc *crtc); 144 void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
112 int (*atomic_check)(struct exynos_drm_crtc *crtc, 145 int (*atomic_check)(struct exynos_drm_crtc *crtc,
113 struct drm_crtc_state *state); 146 struct drm_crtc_state *state);
114 void (*atomic_begin)(struct exynos_drm_crtc *crtc, 147 void (*atomic_begin)(struct exynos_drm_crtc *crtc);
115 struct exynos_drm_plane *plane);
116 void (*update_plane)(struct exynos_drm_crtc *crtc, 148 void (*update_plane)(struct exynos_drm_crtc *crtc,
117 struct exynos_drm_plane *plane); 149 struct exynos_drm_plane *plane);
118 void (*disable_plane)(struct exynos_drm_crtc *crtc, 150 void (*disable_plane)(struct exynos_drm_crtc *crtc,
119 struct exynos_drm_plane *plane); 151 struct exynos_drm_plane *plane);
120 void (*atomic_flush)(struct exynos_drm_crtc *crtc, 152 void (*atomic_flush)(struct exynos_drm_crtc *crtc);
121 struct exynos_drm_plane *plane);
122 void (*te_handler)(struct exynos_drm_crtc *crtc); 153 void (*te_handler)(struct exynos_drm_crtc *crtc);
123 void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable); 154 void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
124}; 155};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 12b03b364703..d84a498ef099 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1458,66 +1458,6 @@ static const struct mipi_dsi_host_ops exynos_dsi_ops = {
1458 .transfer = exynos_dsi_host_transfer, 1458 .transfer = exynos_dsi_host_transfer,
1459}; 1459};
1460 1460
1461static int exynos_dsi_poweron(struct exynos_dsi *dsi)
1462{
1463 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1464 int ret, i;
1465
1466 ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1467 if (ret < 0) {
1468 dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
1469 return ret;
1470 }
1471
1472 for (i = 0; i < driver_data->num_clks; i++) {
1473 ret = clk_prepare_enable(dsi->clks[i]);
1474 if (ret < 0)
1475 goto err_clk;
1476 }
1477
1478 ret = phy_power_on(dsi->phy);
1479 if (ret < 0) {
1480 dev_err(dsi->dev, "cannot enable phy %d\n", ret);
1481 goto err_clk;
1482 }
1483
1484 return 0;
1485
1486err_clk:
1487 while (--i > -1)
1488 clk_disable_unprepare(dsi->clks[i]);
1489 regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1490
1491 return ret;
1492}
1493
1494static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
1495{
1496 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1497 int ret, i;
1498
1499 usleep_range(10000, 20000);
1500
1501 if (dsi->state & DSIM_STATE_INITIALIZED) {
1502 dsi->state &= ~DSIM_STATE_INITIALIZED;
1503
1504 exynos_dsi_disable_clock(dsi);
1505
1506 exynos_dsi_disable_irq(dsi);
1507 }
1508
1509 dsi->state &= ~DSIM_STATE_CMD_LPM;
1510
1511 phy_power_off(dsi->phy);
1512
1513 for (i = driver_data->num_clks - 1; i > -1; i--)
1514 clk_disable_unprepare(dsi->clks[i]);
1515
1516 ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1517 if (ret < 0)
1518 dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
1519}
1520
1521static void exynos_dsi_enable(struct drm_encoder *encoder) 1461static void exynos_dsi_enable(struct drm_encoder *encoder)
1522{ 1462{
1523 struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1463 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1526,16 +1466,14 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
1526 if (dsi->state & DSIM_STATE_ENABLED) 1466 if (dsi->state & DSIM_STATE_ENABLED)
1527 return; 1467 return;
1528 1468
1529 ret = exynos_dsi_poweron(dsi); 1469 pm_runtime_get_sync(dsi->dev);
1530 if (ret < 0)
1531 return;
1532 1470
1533 dsi->state |= DSIM_STATE_ENABLED; 1471 dsi->state |= DSIM_STATE_ENABLED;
1534 1472
1535 ret = drm_panel_prepare(dsi->panel); 1473 ret = drm_panel_prepare(dsi->panel);
1536 if (ret < 0) { 1474 if (ret < 0) {
1537 dsi->state &= ~DSIM_STATE_ENABLED; 1475 dsi->state &= ~DSIM_STATE_ENABLED;
1538 exynos_dsi_poweroff(dsi); 1476 pm_runtime_put_sync(dsi->dev);
1539 return; 1477 return;
1540 } 1478 }
1541 1479
@@ -1547,7 +1485,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
1547 dsi->state &= ~DSIM_STATE_ENABLED; 1485 dsi->state &= ~DSIM_STATE_ENABLED;
1548 exynos_dsi_set_display_enable(dsi, false); 1486 exynos_dsi_set_display_enable(dsi, false);
1549 drm_panel_unprepare(dsi->panel); 1487 drm_panel_unprepare(dsi->panel);
1550 exynos_dsi_poweroff(dsi); 1488 pm_runtime_put_sync(dsi->dev);
1551 return; 1489 return;
1552 } 1490 }
1553 1491
@@ -1569,7 +1507,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
1569 1507
1570 dsi->state &= ~DSIM_STATE_ENABLED; 1508 dsi->state &= ~DSIM_STATE_ENABLED;
1571 1509
1572 exynos_dsi_poweroff(dsi); 1510 pm_runtime_put_sync(dsi->dev);
1573} 1511}
1574 1512
1575static enum drm_connector_status 1513static enum drm_connector_status
@@ -1603,7 +1541,7 @@ static void exynos_dsi_connector_destroy(struct drm_connector *connector)
1603 connector->dev = NULL; 1541 connector->dev = NULL;
1604} 1542}
1605 1543
1606static struct drm_connector_funcs exynos_dsi_connector_funcs = { 1544static const struct drm_connector_funcs exynos_dsi_connector_funcs = {
1607 .dpms = drm_atomic_helper_connector_dpms, 1545 .dpms = drm_atomic_helper_connector_dpms,
1608 .detect = exynos_dsi_detect, 1546 .detect = exynos_dsi_detect,
1609 .fill_modes = drm_helper_probe_single_connector_modes, 1547 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1631,7 +1569,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
1631 return &dsi->encoder; 1569 return &dsi->encoder;
1632} 1570}
1633 1571
1634static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { 1572static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
1635 .get_modes = exynos_dsi_get_modes, 1573 .get_modes = exynos_dsi_get_modes,
1636 .best_encoder = exynos_dsi_best_encoder, 1574 .best_encoder = exynos_dsi_best_encoder,
1637}; 1575};
@@ -1684,14 +1622,14 @@ static void exynos_dsi_mode_set(struct drm_encoder *encoder,
1684 vm->hsync_len = m->hsync_end - m->hsync_start; 1622 vm->hsync_len = m->hsync_end - m->hsync_start;
1685} 1623}
1686 1624
1687static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { 1625static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
1688 .mode_fixup = exynos_dsi_mode_fixup, 1626 .mode_fixup = exynos_dsi_mode_fixup,
1689 .mode_set = exynos_dsi_mode_set, 1627 .mode_set = exynos_dsi_mode_set,
1690 .enable = exynos_dsi_enable, 1628 .enable = exynos_dsi_enable,
1691 .disable = exynos_dsi_disable, 1629 .disable = exynos_dsi_disable,
1692}; 1630};
1693 1631
1694static struct drm_encoder_funcs exynos_dsi_encoder_funcs = { 1632static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
1695 .destroy = drm_encoder_cleanup, 1633 .destroy = drm_encoder_cleanup,
1696}; 1634};
1697 1635
@@ -1797,13 +1735,13 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1797 1735
1798 ep = of_graph_get_next_endpoint(node, NULL); 1736 ep = of_graph_get_next_endpoint(node, NULL);
1799 if (!ep) { 1737 if (!ep) {
1800 ret = -ENXIO; 1738 ret = -EINVAL;
1801 goto end; 1739 goto end;
1802 } 1740 }
1803 1741
1804 dsi->bridge_node = of_graph_get_remote_port_parent(ep); 1742 dsi->bridge_node = of_graph_get_remote_port_parent(ep);
1805 if (!dsi->bridge_node) { 1743 if (!dsi->bridge_node) {
1806 ret = -ENXIO; 1744 ret = -EINVAL;
1807 goto end; 1745 goto end;
1808 } 1746 }
1809end: 1747end:
@@ -1831,7 +1769,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1831 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1769 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1832 1770
1833 drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs, 1771 drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
1834 DRM_MODE_ENCODER_TMDS); 1772 DRM_MODE_ENCODER_TMDS, NULL);
1835 1773
1836 drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs); 1774 drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
1837 1775
@@ -1954,22 +1892,99 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1954 1892
1955 platform_set_drvdata(pdev, &dsi->encoder); 1893 platform_set_drvdata(pdev, &dsi->encoder);
1956 1894
1895 pm_runtime_enable(dev);
1896
1957 return component_add(dev, &exynos_dsi_component_ops); 1897 return component_add(dev, &exynos_dsi_component_ops);
1958} 1898}
1959 1899
1960static int exynos_dsi_remove(struct platform_device *pdev) 1900static int exynos_dsi_remove(struct platform_device *pdev)
1961{ 1901{
1902 pm_runtime_disable(&pdev->dev);
1903
1962 component_del(&pdev->dev, &exynos_dsi_component_ops); 1904 component_del(&pdev->dev, &exynos_dsi_component_ops);
1963 1905
1964 return 0; 1906 return 0;
1965} 1907}
1966 1908
1909#ifdef CONFIG_PM
1910static int exynos_dsi_suspend(struct device *dev)
1911{
1912 struct drm_encoder *encoder = dev_get_drvdata(dev);
1913 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1914 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1915 int ret, i;
1916
1917 usleep_range(10000, 20000);
1918
1919 if (dsi->state & DSIM_STATE_INITIALIZED) {
1920 dsi->state &= ~DSIM_STATE_INITIALIZED;
1921
1922 exynos_dsi_disable_clock(dsi);
1923
1924 exynos_dsi_disable_irq(dsi);
1925 }
1926
1927 dsi->state &= ~DSIM_STATE_CMD_LPM;
1928
1929 phy_power_off(dsi->phy);
1930
1931 for (i = driver_data->num_clks - 1; i > -1; i--)
1932 clk_disable_unprepare(dsi->clks[i]);
1933
1934 ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1935 if (ret < 0)
1936 dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
1937
1938 return 0;
1939}
1940
1941static int exynos_dsi_resume(struct device *dev)
1942{
1943 struct drm_encoder *encoder = dev_get_drvdata(dev);
1944 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1945 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1946 int ret, i;
1947
1948 ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1949 if (ret < 0) {
1950 dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
1951 return ret;
1952 }
1953
1954 for (i = 0; i < driver_data->num_clks; i++) {
1955 ret = clk_prepare_enable(dsi->clks[i]);
1956 if (ret < 0)
1957 goto err_clk;
1958 }
1959
1960 ret = phy_power_on(dsi->phy);
1961 if (ret < 0) {
1962 dev_err(dsi->dev, "cannot enable phy %d\n", ret);
1963 goto err_clk;
1964 }
1965
1966 return 0;
1967
1968err_clk:
1969 while (--i > -1)
1970 clk_disable_unprepare(dsi->clks[i]);
1971 regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1972
1973 return ret;
1974}
1975#endif
1976
1977static const struct dev_pm_ops exynos_dsi_pm_ops = {
1978 SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
1979};
1980
1967struct platform_driver dsi_driver = { 1981struct platform_driver dsi_driver = {
1968 .probe = exynos_dsi_probe, 1982 .probe = exynos_dsi_probe,
1969 .remove = exynos_dsi_remove, 1983 .remove = exynos_dsi_remove,
1970 .driver = { 1984 .driver = {
1971 .name = "exynos-dsi", 1985 .name = "exynos-dsi",
1972 .owner = THIS_MODULE, 1986 .owner = THIS_MODULE,
1987 .pm = &exynos_dsi_pm_ops,
1973 .of_match_table = exynos_dsi_of_match, 1988 .of_match_table = exynos_dsi_of_match,
1974 }, 1989 },
1975}; 1990};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 49b9bc302e87..d614194644c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -37,6 +37,7 @@
37struct exynos_drm_fb { 37struct exynos_drm_fb {
38 struct drm_framebuffer fb; 38 struct drm_framebuffer fb;
39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
40 dma_addr_t dma_addr[MAX_FB_BUFFER];
40}; 41};
41 42
42static int check_fb_gem_memory_type(struct drm_device *drm_dev, 43static int check_fb_gem_memory_type(struct drm_device *drm_dev,
@@ -70,9 +71,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
70 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 71 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
71 unsigned int i; 72 unsigned int i;
72 73
73 /* make sure that overlay data are updated before relesing fb. */
74 exynos_drm_crtc_complete_scanout(fb);
75
76 drm_framebuffer_cleanup(fb); 74 drm_framebuffer_cleanup(fb);
77 75
78 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { 76 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
@@ -109,7 +107,7 @@ static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
109 return 0; 107 return 0;
110} 108}
111 109
112static struct drm_framebuffer_funcs exynos_drm_fb_funcs = { 110static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
113 .destroy = exynos_drm_fb_destroy, 111 .destroy = exynos_drm_fb_destroy,
114 .create_handle = exynos_drm_fb_create_handle, 112 .create_handle = exynos_drm_fb_create_handle,
115 .dirty = exynos_drm_fb_dirty, 113 .dirty = exynos_drm_fb_dirty,
@@ -135,6 +133,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
135 goto err; 133 goto err;
136 134
137 exynos_fb->exynos_gem[i] = exynos_gem[i]; 135 exynos_fb->exynos_gem[i] = exynos_gem[i];
136 exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
137 + mode_cmd->offsets[i];
138 } 138 }
139 139
140 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 140 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
@@ -189,21 +189,14 @@ err:
189 return ERR_PTR(ret); 189 return ERR_PTR(ret);
190} 190}
191 191
192struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index) 192dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
193{ 193{
194 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 194 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
195 struct exynos_drm_gem *exynos_gem;
196 195
197 if (index >= MAX_FB_BUFFER) 196 if (index >= MAX_FB_BUFFER)
198 return NULL; 197 return DMA_ERROR_CODE;
199
200 exynos_gem = exynos_fb->exynos_gem[index];
201 if (!exynos_gem)
202 return NULL;
203
204 DRM_DEBUG_KMS("dma_addr: 0x%lx\n", (unsigned long)exynos_gem->dma_addr);
205 198
206 return exynos_gem; 199 return exynos_fb->dma_addr[index];
207} 200}
208 201
209static void exynos_drm_output_poll_changed(struct drm_device *dev) 202static void exynos_drm_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index a8a75ac87e59..4aae9dd2b0d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -22,8 +22,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
22 struct exynos_drm_gem **exynos_gem, 22 struct exynos_drm_gem **exynos_gem,
23 int count); 23 int count);
24 24
25/* get gem object of a drm framebuffer */ 25dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index);
26struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index);
27 26
28void exynos_drm_mode_config_init(struct drm_device *dev); 27void exynos_drm_mode_config_init(struct drm_device *dev);
29 28
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bd75c1531cac..70194d0e4fe4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -29,6 +29,7 @@
29#include <drm/exynos_drm.h> 29#include <drm/exynos_drm.h>
30 30
31#include "exynos_drm_drv.h" 31#include "exynos_drm_drv.h"
32#include "exynos_drm_fb.h"
32#include "exynos_drm_fbdev.h" 33#include "exynos_drm_fbdev.h"
33#include "exynos_drm_crtc.h" 34#include "exynos_drm_crtc.h"
34#include "exynos_drm_plane.h" 35#include "exynos_drm_plane.h"
@@ -87,7 +88,6 @@
87 88
88/* FIMD has totally five hardware windows. */ 89/* FIMD has totally five hardware windows. */
89#define WINDOWS_NR 5 90#define WINDOWS_NR 5
90#define CURSOR_WIN 4
91 91
92struct fimd_driver_data { 92struct fimd_driver_data {
93 unsigned int timing_base; 93 unsigned int timing_base;
@@ -150,6 +150,7 @@ struct fimd_context {
150 struct drm_device *drm_dev; 150 struct drm_device *drm_dev;
151 struct exynos_drm_crtc *crtc; 151 struct exynos_drm_crtc *crtc;
152 struct exynos_drm_plane planes[WINDOWS_NR]; 152 struct exynos_drm_plane planes[WINDOWS_NR];
153 struct exynos_drm_plane_config configs[WINDOWS_NR];
153 struct clk *bus_clk; 154 struct clk *bus_clk;
154 struct clk *lcd_clk; 155 struct clk *lcd_clk;
155 void __iomem *regs; 156 void __iomem *regs;
@@ -187,6 +188,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
187}; 188};
188MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); 189MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
189 190
191static const enum drm_plane_type fimd_win_types[WINDOWS_NR] = {
192 DRM_PLANE_TYPE_PRIMARY,
193 DRM_PLANE_TYPE_OVERLAY,
194 DRM_PLANE_TYPE_OVERLAY,
195 DRM_PLANE_TYPE_OVERLAY,
196 DRM_PLANE_TYPE_CURSOR,
197};
198
190static const uint32_t fimd_formats[] = { 199static const uint32_t fimd_formats[] = {
191 DRM_FORMAT_C8, 200 DRM_FORMAT_C8,
192 DRM_FORMAT_XRGB1555, 201 DRM_FORMAT_XRGB1555,
@@ -478,7 +487,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
478 487
479 488
480static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, 489static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
481 struct drm_framebuffer *fb) 490 uint32_t pixel_format, int width)
482{ 491{
483 unsigned long val; 492 unsigned long val;
484 493
@@ -489,11 +498,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
489 * So the request format is ARGB8888 then change it to XRGB8888. 498 * So the request format is ARGB8888 then change it to XRGB8888.
490 */ 499 */
491 if (ctx->driver_data->has_limited_fmt && !win) { 500 if (ctx->driver_data->has_limited_fmt && !win) {
492 if (fb->pixel_format == DRM_FORMAT_ARGB8888) 501 if (pixel_format == DRM_FORMAT_ARGB8888)
493 fb->pixel_format = DRM_FORMAT_XRGB8888; 502 pixel_format = DRM_FORMAT_XRGB8888;
494 } 503 }
495 504
496 switch (fb->pixel_format) { 505 switch (pixel_format) {
497 case DRM_FORMAT_C8: 506 case DRM_FORMAT_C8:
498 val |= WINCON0_BPPMODE_8BPP_PALETTE; 507 val |= WINCON0_BPPMODE_8BPP_PALETTE;
499 val |= WINCONx_BURSTLEN_8WORD; 508 val |= WINCONx_BURSTLEN_8WORD;
@@ -529,17 +538,15 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
529 break; 538 break;
530 } 539 }
531 540
532 DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
533
534 /* 541 /*
535 * In case of exynos, setting dma-burst to 16Word causes permanent 542 * Setting dma-burst to 16Word causes permanent tearing for very small
536 * tearing for very small buffers, e.g. cursor buffer. Burst Mode 543 * buffers, e.g. cursor buffer. Burst Mode switching which based on
537 * switching which is based on plane size is not recommended as 544 * plane size is not recommended as plane size varies alot towards the
538 * plane size varies alot towards the end of the screen and rapid 545 * end of the screen and rapid movement causes unstable DMA, but it is
539 * movement causes unstable DMA which results into iommu crash/tear. 546 * still better to change dma-burst than displaying garbage.
540 */ 547 */
541 548
542 if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) { 549 if (width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
543 val &= ~WINCONx_BURSTLEN_MASK; 550 val &= ~WINCONx_BURSTLEN_MASK;
544 val |= WINCONx_BURSTLEN_4WORD; 551 val |= WINCONx_BURSTLEN_4WORD;
545 } 552 }
@@ -615,64 +622,68 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
615 writel(val, ctx->regs + reg); 622 writel(val, ctx->regs + reg);
616} 623}
617 624
618static void fimd_atomic_begin(struct exynos_drm_crtc *crtc, 625static void fimd_atomic_begin(struct exynos_drm_crtc *crtc)
619 struct exynos_drm_plane *plane)
620{ 626{
621 struct fimd_context *ctx = crtc->ctx; 627 struct fimd_context *ctx = crtc->ctx;
628 int i;
622 629
623 if (ctx->suspended) 630 if (ctx->suspended)
624 return; 631 return;
625 632
626 fimd_shadow_protect_win(ctx, plane->zpos, true); 633 for (i = 0; i < WINDOWS_NR; i++)
634 fimd_shadow_protect_win(ctx, i, true);
627} 635}
628 636
629static void fimd_atomic_flush(struct exynos_drm_crtc *crtc, 637static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
630 struct exynos_drm_plane *plane)
631{ 638{
632 struct fimd_context *ctx = crtc->ctx; 639 struct fimd_context *ctx = crtc->ctx;
640 int i;
633 641
634 if (ctx->suspended) 642 if (ctx->suspended)
635 return; 643 return;
636 644
637 fimd_shadow_protect_win(ctx, plane->zpos, false); 645 for (i = 0; i < WINDOWS_NR; i++)
646 fimd_shadow_protect_win(ctx, i, false);
638} 647}
639 648
640static void fimd_update_plane(struct exynos_drm_crtc *crtc, 649static void fimd_update_plane(struct exynos_drm_crtc *crtc,
641 struct exynos_drm_plane *plane) 650 struct exynos_drm_plane *plane)
642{ 651{
652 struct exynos_drm_plane_state *state =
653 to_exynos_plane_state(plane->base.state);
643 struct fimd_context *ctx = crtc->ctx; 654 struct fimd_context *ctx = crtc->ctx;
644 struct drm_plane_state *state = plane->base.state; 655 struct drm_framebuffer *fb = state->base.fb;
645 dma_addr_t dma_addr; 656 dma_addr_t dma_addr;
646 unsigned long val, size, offset; 657 unsigned long val, size, offset;
647 unsigned int last_x, last_y, buf_offsize, line_size; 658 unsigned int last_x, last_y, buf_offsize, line_size;
648 unsigned int win = plane->zpos; 659 unsigned int win = plane->index;
649 unsigned int bpp = state->fb->bits_per_pixel >> 3; 660 unsigned int bpp = fb->bits_per_pixel >> 3;
650 unsigned int pitch = state->fb->pitches[0]; 661 unsigned int pitch = fb->pitches[0];
651 662
652 if (ctx->suspended) 663 if (ctx->suspended)
653 return; 664 return;
654 665
655 offset = plane->src_x * bpp; 666 offset = state->src.x * bpp;
656 offset += plane->src_y * pitch; 667 offset += state->src.y * pitch;
657 668
658 /* buffer start address */ 669 /* buffer start address */
659 dma_addr = plane->dma_addr[0] + offset; 670 dma_addr = exynos_drm_fb_dma_addr(fb, 0) + offset;
660 val = (unsigned long)dma_addr; 671 val = (unsigned long)dma_addr;
661 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 672 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
662 673
663 /* buffer end address */ 674 /* buffer end address */
664 size = pitch * plane->crtc_h; 675 size = pitch * state->crtc.h;
665 val = (unsigned long)(dma_addr + size); 676 val = (unsigned long)(dma_addr + size);
666 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 677 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
667 678
668 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", 679 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
669 (unsigned long)dma_addr, val, size); 680 (unsigned long)dma_addr, val, size);
670 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 681 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
671 plane->crtc_w, plane->crtc_h); 682 state->crtc.w, state->crtc.h);
672 683
673 /* buffer size */ 684 /* buffer size */
674 buf_offsize = pitch - (plane->crtc_w * bpp); 685 buf_offsize = pitch - (state->crtc.w * bpp);
675 line_size = plane->crtc_w * bpp; 686 line_size = state->crtc.w * bpp;
676 val = VIDW_BUF_SIZE_OFFSET(buf_offsize) | 687 val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
677 VIDW_BUF_SIZE_PAGEWIDTH(line_size) | 688 VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
678 VIDW_BUF_SIZE_OFFSET_E(buf_offsize) | 689 VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -680,16 +691,16 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
680 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); 691 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
681 692
682 /* OSD position */ 693 /* OSD position */
683 val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | 694 val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
684 VIDOSDxA_TOPLEFT_Y(plane->crtc_y) | 695 VIDOSDxA_TOPLEFT_Y(state->crtc.y) |
685 VIDOSDxA_TOPLEFT_X_E(plane->crtc_x) | 696 VIDOSDxA_TOPLEFT_X_E(state->crtc.x) |
686 VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y); 697 VIDOSDxA_TOPLEFT_Y_E(state->crtc.y);
687 writel(val, ctx->regs + VIDOSD_A(win)); 698 writel(val, ctx->regs + VIDOSD_A(win));
688 699
689 last_x = plane->crtc_x + plane->crtc_w; 700 last_x = state->crtc.x + state->crtc.w;
690 if (last_x) 701 if (last_x)
691 last_x--; 702 last_x--;
692 last_y = plane->crtc_y + plane->crtc_h; 703 last_y = state->crtc.y + state->crtc.h;
693 if (last_y) 704 if (last_y)
694 last_y--; 705 last_y--;
695 706
@@ -699,20 +710,20 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
699 writel(val, ctx->regs + VIDOSD_B(win)); 710 writel(val, ctx->regs + VIDOSD_B(win));
700 711
701 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", 712 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
702 plane->crtc_x, plane->crtc_y, last_x, last_y); 713 state->crtc.x, state->crtc.y, last_x, last_y);
703 714
704 /* OSD size */ 715 /* OSD size */
705 if (win != 3 && win != 4) { 716 if (win != 3 && win != 4) {
706 u32 offset = VIDOSD_D(win); 717 u32 offset = VIDOSD_D(win);
707 if (win == 0) 718 if (win == 0)
708 offset = VIDOSD_C(win); 719 offset = VIDOSD_C(win);
709 val = plane->crtc_w * plane->crtc_h; 720 val = state->crtc.w * state->crtc.h;
710 writel(val, ctx->regs + offset); 721 writel(val, ctx->regs + offset);
711 722
712 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); 723 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
713 } 724 }
714 725
715 fimd_win_set_pixfmt(ctx, win, state->fb); 726 fimd_win_set_pixfmt(ctx, win, fb->pixel_format, state->src.w);
716 727
717 /* hardware window 0 doesn't support color key. */ 728 /* hardware window 0 doesn't support color key. */
718 if (win != 0) 729 if (win != 0)
@@ -731,7 +742,7 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
731 struct exynos_drm_plane *plane) 742 struct exynos_drm_plane *plane)
732{ 743{
733 struct fimd_context *ctx = crtc->ctx; 744 struct fimd_context *ctx = crtc->ctx;
734 unsigned int win = plane->zpos; 745 unsigned int win = plane->index;
735 746
736 if (ctx->suspended) 747 if (ctx->suspended)
737 return; 748 return;
@@ -745,7 +756,6 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
745static void fimd_enable(struct exynos_drm_crtc *crtc) 756static void fimd_enable(struct exynos_drm_crtc *crtc)
746{ 757{
747 struct fimd_context *ctx = crtc->ctx; 758 struct fimd_context *ctx = crtc->ctx;
748 int ret;
749 759
750 if (!ctx->suspended) 760 if (!ctx->suspended)
751 return; 761 return;
@@ -754,18 +764,6 @@ static void fimd_enable(struct exynos_drm_crtc *crtc)
754 764
755 pm_runtime_get_sync(ctx->dev); 765 pm_runtime_get_sync(ctx->dev);
756 766
757 ret = clk_prepare_enable(ctx->bus_clk);
758 if (ret < 0) {
759 DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
760 return;
761 }
762
763 ret = clk_prepare_enable(ctx->lcd_clk);
764 if (ret < 0) {
765 DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
766 return;
767 }
768
769 /* if vblank was enabled status, enable it again. */ 767 /* if vblank was enabled status, enable it again. */
770 if (test_and_clear_bit(0, &ctx->irq_flags)) 768 if (test_and_clear_bit(0, &ctx->irq_flags))
771 fimd_enable_vblank(ctx->crtc); 769 fimd_enable_vblank(ctx->crtc);
@@ -795,11 +793,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
795 793
796 writel(0, ctx->regs + VIDCON0); 794 writel(0, ctx->regs + VIDCON0);
797 795
798 clk_disable_unprepare(ctx->lcd_clk);
799 clk_disable_unprepare(ctx->bus_clk);
800
801 pm_runtime_put_sync(ctx->dev); 796 pm_runtime_put_sync(ctx->dev);
802
803 ctx->suspended = true; 797 ctx->suspended = true;
804} 798}
805 799
@@ -941,18 +935,19 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
941 struct drm_device *drm_dev = data; 935 struct drm_device *drm_dev = data;
942 struct exynos_drm_private *priv = drm_dev->dev_private; 936 struct exynos_drm_private *priv = drm_dev->dev_private;
943 struct exynos_drm_plane *exynos_plane; 937 struct exynos_drm_plane *exynos_plane;
944 enum drm_plane_type type; 938 unsigned int i;
945 unsigned int zpos;
946 int ret; 939 int ret;
947 940
948 ctx->drm_dev = drm_dev; 941 ctx->drm_dev = drm_dev;
949 ctx->pipe = priv->pipe++; 942 ctx->pipe = priv->pipe++;
950 943
951 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 944 for (i = 0; i < WINDOWS_NR; i++) {
952 type = exynos_plane_get_type(zpos, CURSOR_WIN); 945 ctx->configs[i].pixel_formats = fimd_formats;
953 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 946 ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats);
954 1 << ctx->pipe, type, fimd_formats, 947 ctx->configs[i].zpos = i;
955 ARRAY_SIZE(fimd_formats), zpos); 948 ctx->configs[i].type = fimd_win_types[i];
949 ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
950 1 << ctx->pipe, &ctx->configs[i]);
956 if (ret) 951 if (ret)
957 return ret; 952 return ret;
958 } 953 }
@@ -1121,12 +1116,49 @@ static int fimd_remove(struct platform_device *pdev)
1121 return 0; 1116 return 0;
1122} 1117}
1123 1118
1119#ifdef CONFIG_PM
1120static int exynos_fimd_suspend(struct device *dev)
1121{
1122 struct fimd_context *ctx = dev_get_drvdata(dev);
1123
1124 clk_disable_unprepare(ctx->lcd_clk);
1125 clk_disable_unprepare(ctx->bus_clk);
1126
1127 return 0;
1128}
1129
1130static int exynos_fimd_resume(struct device *dev)
1131{
1132 struct fimd_context *ctx = dev_get_drvdata(dev);
1133 int ret;
1134
1135 ret = clk_prepare_enable(ctx->bus_clk);
1136 if (ret < 0) {
1137 DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
1138 return ret;
1139 }
1140
1141 ret = clk_prepare_enable(ctx->lcd_clk);
1142 if (ret < 0) {
1143 DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
1144 return ret;
1145 }
1146
1147 return 0;
1148}
1149#endif
1150
1151static const struct dev_pm_ops exynos_fimd_pm_ops = {
1152 SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
1153};
1154
1124struct platform_driver fimd_driver = { 1155struct platform_driver fimd_driver = {
1125 .probe = fimd_probe, 1156 .probe = fimd_probe,
1126 .remove = fimd_remove, 1157 .remove = fimd_remove,
1127 .driver = { 1158 .driver = {
1128 .name = "exynos4-fb", 1159 .name = "exynos4-fb",
1129 .owner = THIS_MODULE, 1160 .owner = THIS_MODULE,
1161 .pm = &exynos_fimd_pm_ops,
1130 .of_match_table = fimd_driver_dt_match, 1162 .of_match_table = fimd_driver_dt_match,
1131 }, 1163 },
1132}; 1164};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 37ab8b282db6..9ca5047959ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -55,8 +55,6 @@ struct exynos_drm_gem {
55 struct sg_table *sgt; 55 struct sg_table *sgt;
56}; 56};
57 57
58struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
59
60/* destroy a buffer with gem object */ 58/* destroy a buffer with gem object */
61void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem); 59void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
62 60
@@ -91,10 +89,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
91 unsigned int gem_handle, 89 unsigned int gem_handle,
92 struct drm_file *filp); 90 struct drm_file *filp);
93 91
94/* map user space allocated by malloc to pages. */
95int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv);
97
98/* get buffer information to memory region allocated by gem. */ 92/* get buffer information to memory region allocated by gem. */
99int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 93int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
100 struct drm_file *file_priv); 94 struct drm_file *file_priv);
@@ -123,28 +117,6 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
123/* set vm_flags and we can change the vm attribute to other one at here. */ 117/* set vm_flags and we can change the vm attribute to other one at here. */
124int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 118int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
125 119
126static inline int vma_is_io(struct vm_area_struct *vma)
127{
128 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
129}
130
131/* get a copy of a virtual memory region. */
132struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
133
134/* release a userspace virtual memory area. */
135void exynos_gem_put_vma(struct vm_area_struct *vma);
136
137/* get pages from user space. */
138int exynos_gem_get_pages_from_userptr(unsigned long start,
139 unsigned int npages,
140 struct page **pages,
141 struct vm_area_struct *vma);
142
143/* drop the reference to pages. */
144void exynos_gem_put_pages_to_userptr(struct page **pages,
145 unsigned int npages,
146 struct vm_area_struct *vma);
147
148/* map sgt with dma region. */ 120/* map sgt with dma region. */
149int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, 121int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
150 struct sg_table *sgt, 122 struct sg_table *sgt,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 11b87d2a7913..7aecd23cfa11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -15,7 +15,8 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <plat/map-base.h> 18#include <linux/mfd/syscon.h>
19#include <linux/regmap.h>
19 20
20#include <drm/drmP.h> 21#include <drm/drmP.h>
21#include <drm/exynos_drm.h> 22#include <drm/exynos_drm.h>
@@ -126,6 +127,7 @@ struct gsc_capability {
126 * @ippdrv: prepare initialization using ippdrv. 127 * @ippdrv: prepare initialization using ippdrv.
127 * @regs_res: register resources. 128 * @regs_res: register resources.
128 * @regs: memory mapped io registers. 129 * @regs: memory mapped io registers.
130 * @sysreg: handle to SYSREG block regmap.
129 * @lock: locking of operations. 131 * @lock: locking of operations.
130 * @gsc_clk: gsc gate clock. 132 * @gsc_clk: gsc gate clock.
131 * @sc: scaler infomations. 133 * @sc: scaler infomations.
@@ -138,6 +140,7 @@ struct gsc_context {
138 struct exynos_drm_ippdrv ippdrv; 140 struct exynos_drm_ippdrv ippdrv;
139 struct resource *regs_res; 141 struct resource *regs_res;
140 void __iomem *regs; 142 void __iomem *regs;
143 struct regmap *sysreg;
141 struct mutex lock; 144 struct mutex lock;
142 struct clk *gsc_clk; 145 struct clk *gsc_clk;
143 struct gsc_scaler sc; 146 struct gsc_scaler sc;
@@ -437,9 +440,12 @@ static int gsc_sw_reset(struct gsc_context *ctx)
437 440
438static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) 441static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
439{ 442{
440 u32 gscblk_cfg; 443 unsigned int gscblk_cfg;
441 444
442 gscblk_cfg = readl(SYSREG_GSCBLK_CFG1); 445 if (!ctx->sysreg)
446 return;
447
448 regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg);
443 449
444 if (enable) 450 if (enable)
445 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) | 451 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
@@ -448,7 +454,7 @@ static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
448 else 454 else
449 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id); 455 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
450 456
451 writel(gscblk_cfg, SYSREG_GSCBLK_CFG1); 457 regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg);
452} 458}
453 459
454static void gsc_handle_irq(struct gsc_context *ctx, bool enable, 460static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
@@ -1215,10 +1221,10 @@ static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1215 DRM_DEBUG_KMS("enable[%d]\n", enable); 1221 DRM_DEBUG_KMS("enable[%d]\n", enable);
1216 1222
1217 if (enable) { 1223 if (enable) {
1218 clk_enable(ctx->gsc_clk); 1224 clk_prepare_enable(ctx->gsc_clk);
1219 ctx->suspended = false; 1225 ctx->suspended = false;
1220 } else { 1226 } else {
1221 clk_disable(ctx->gsc_clk); 1227 clk_disable_unprepare(ctx->gsc_clk);
1222 ctx->suspended = true; 1228 ctx->suspended = true;
1223 } 1229 }
1224 1230
@@ -1663,6 +1669,15 @@ static int gsc_probe(struct platform_device *pdev)
1663 if (!ctx) 1669 if (!ctx)
1664 return -ENOMEM; 1670 return -ENOMEM;
1665 1671
1672 if (dev->of_node) {
1673 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
1674 "samsung,sysreg");
1675 if (IS_ERR(ctx->sysreg)) {
1676 dev_warn(dev, "failed to get system register.\n");
1677 ctx->sysreg = NULL;
1678 }
1679 }
1680
1666 /* clock control */ 1681 /* clock control */
1667 ctx->gsc_clk = devm_clk_get(dev, "gscl"); 1682 ctx->gsc_clk = devm_clk_get(dev, "gscl");
1668 if (IS_ERR(ctx->gsc_clk)) { 1683 if (IS_ERR(ctx->gsc_clk)) {
@@ -1713,7 +1728,6 @@ static int gsc_probe(struct platform_device *pdev)
1713 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1714 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
1715 1730
1716 pm_runtime_set_active(dev);
1717 pm_runtime_enable(dev); 1731 pm_runtime_enable(dev);
1718 1732
1719 ret = exynos_drm_ippdrv_register(ippdrv); 1733 ret = exynos_drm_ippdrv_register(ippdrv);
@@ -1797,6 +1811,12 @@ static const struct dev_pm_ops gsc_pm_ops = {
1797 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) 1811 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1798}; 1812};
1799 1813
1814static const struct of_device_id exynos_drm_gsc_of_match[] = {
1815 { .compatible = "samsung,exynos5-gsc" },
1816 { },
1817};
1818MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
1819
1800struct platform_driver gsc_driver = { 1820struct platform_driver gsc_driver = {
1801 .probe = gsc_probe, 1821 .probe = gsc_probe,
1802 .remove = gsc_remove, 1822 .remove = gsc_remove,
@@ -1804,6 +1824,7 @@ struct platform_driver gsc_driver = {
1804 .name = "exynos-drm-gsc", 1824 .name = "exynos-drm-gsc",
1805 .owner = THIS_MODULE, 1825 .owner = THIS_MODULE,
1806 .pm = &gsc_pm_ops, 1826 .pm = &gsc_pm_ops,
1827 .of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
1807 }, 1828 },
1808}; 1829};
1809 1830
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 8994eab56ba8..4eaef36aec5a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -389,7 +389,7 @@ already_disabled:
389 mutex_unlock(&mic_mutex); 389 mutex_unlock(&mic_mutex);
390} 390}
391 391
392struct drm_bridge_funcs mic_bridge_funcs = { 392static const struct drm_bridge_funcs mic_bridge_funcs = {
393 .disable = mic_disable, 393 .disable = mic_disable,
394 .post_disable = mic_post_disable, 394 .post_disable = mic_post_disable,
395 .pre_enable = mic_pre_enable, 395 .pre_enable = mic_pre_enable,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 179311760bb7..d86227236f55 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -56,93 +56,213 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
56 return size; 56 return size;
57} 57}
58 58
59static void exynos_plane_mode_set(struct drm_plane *plane, 59static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
60 struct drm_crtc *crtc, 60
61 struct drm_framebuffer *fb,
62 int crtc_x, int crtc_y,
63 unsigned int crtc_w, unsigned int crtc_h,
64 uint32_t src_x, uint32_t src_y,
65 uint32_t src_w, uint32_t src_h)
66{ 61{
67 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 62 struct drm_plane_state *state = &exynos_state->base;
63 struct drm_crtc *crtc = exynos_state->base.crtc;
68 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 64 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
65 int crtc_x, crtc_y;
66 unsigned int crtc_w, crtc_h;
67 unsigned int src_x, src_y;
68 unsigned int src_w, src_h;
69 unsigned int actual_w; 69 unsigned int actual_w;
70 unsigned int actual_h; 70 unsigned int actual_h;
71 71
72 /*
73 * The original src/dest coordinates are stored in exynos_state->base,
74 * but we want to keep another copy internal to our driver that we can
75 * clip/modify ourselves.
76 */
77
78 crtc_x = state->crtc_x;
79 crtc_y = state->crtc_y;
80 crtc_w = state->crtc_w;
81 crtc_h = state->crtc_h;
82
83 src_x = state->src_x >> 16;
84 src_y = state->src_y >> 16;
85 src_w = state->src_w >> 16;
86 src_h = state->src_h >> 16;
87
88 /* set ratio */
89 exynos_state->h_ratio = (src_w << 16) / crtc_w;
90 exynos_state->v_ratio = (src_h << 16) / crtc_h;
91
92 /* clip to visible area */
72 actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay); 93 actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay);
73 actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay); 94 actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay);
74 95
75 if (crtc_x < 0) { 96 if (crtc_x < 0) {
76 if (actual_w) 97 if (actual_w)
77 src_x -= crtc_x; 98 src_x += ((-crtc_x) * exynos_state->h_ratio) >> 16;
78 crtc_x = 0; 99 crtc_x = 0;
79 } 100 }
80 101
81 if (crtc_y < 0) { 102 if (crtc_y < 0) {
82 if (actual_h) 103 if (actual_h)
83 src_y -= crtc_y; 104 src_y += ((-crtc_y) * exynos_state->v_ratio) >> 16;
84 crtc_y = 0; 105 crtc_y = 0;
85 } 106 }
86 107
87 /* set ratio */
88 exynos_plane->h_ratio = (src_w << 16) / crtc_w;
89 exynos_plane->v_ratio = (src_h << 16) / crtc_h;
90
91 /* set drm framebuffer data. */ 108 /* set drm framebuffer data. */
92 exynos_plane->src_x = src_x; 109 exynos_state->src.x = src_x;
93 exynos_plane->src_y = src_y; 110 exynos_state->src.y = src_y;
94 exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16; 111 exynos_state->src.w = (actual_w * exynos_state->h_ratio) >> 16;
95 exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16; 112 exynos_state->src.h = (actual_h * exynos_state->v_ratio) >> 16;
96 113
97 /* set plane range to be displayed. */ 114 /* set plane range to be displayed. */
98 exynos_plane->crtc_x = crtc_x; 115 exynos_state->crtc.x = crtc_x;
99 exynos_plane->crtc_y = crtc_y; 116 exynos_state->crtc.y = crtc_y;
100 exynos_plane->crtc_w = actual_w; 117 exynos_state->crtc.w = actual_w;
101 exynos_plane->crtc_h = actual_h; 118 exynos_state->crtc.h = actual_h;
102 119
103 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)", 120 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
104 exynos_plane->crtc_x, exynos_plane->crtc_y, 121 exynos_state->crtc.x, exynos_state->crtc.y,
105 exynos_plane->crtc_w, exynos_plane->crtc_h); 122 exynos_state->crtc.w, exynos_state->crtc.h);
123}
124
125static void exynos_drm_plane_reset(struct drm_plane *plane)
126{
127 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
128 struct exynos_drm_plane_state *exynos_state;
129
130 if (plane->state) {
131 exynos_state = to_exynos_plane_state(plane->state);
132 if (exynos_state->base.fb)
133 drm_framebuffer_unreference(exynos_state->base.fb);
134 kfree(exynos_state);
135 plane->state = NULL;
136 }
137
138 exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
139 if (exynos_state) {
140 exynos_state->zpos = exynos_plane->config->zpos;
141 plane->state = &exynos_state->base;
142 plane->state->plane = plane;
143 }
144}
145
146static struct drm_plane_state *
147exynos_drm_plane_duplicate_state(struct drm_plane *plane)
148{
149 struct exynos_drm_plane_state *exynos_state;
150 struct exynos_drm_plane_state *copy;
151
152 exynos_state = to_exynos_plane_state(plane->state);
153 copy = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
154 if (!copy)
155 return NULL;
156
157 __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
158 copy->zpos = exynos_state->zpos;
159 return &copy->base;
160}
161
162static void exynos_drm_plane_destroy_state(struct drm_plane *plane,
163 struct drm_plane_state *old_state)
164{
165 struct exynos_drm_plane_state *old_exynos_state =
166 to_exynos_plane_state(old_state);
167 __drm_atomic_helper_plane_destroy_state(plane, old_state);
168 kfree(old_exynos_state);
169}
170
171static int exynos_drm_plane_atomic_set_property(struct drm_plane *plane,
172 struct drm_plane_state *state,
173 struct drm_property *property,
174 uint64_t val)
175{
176 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
177 struct exynos_drm_plane_state *exynos_state =
178 to_exynos_plane_state(state);
179 struct exynos_drm_private *dev_priv = plane->dev->dev_private;
180 const struct exynos_drm_plane_config *config = exynos_plane->config;
106 181
107 plane->crtc = crtc; 182 if (property == dev_priv->plane_zpos_property &&
183 (config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS))
184 exynos_state->zpos = val;
185 else
186 return -EINVAL;
187
188 return 0;
189}
190
191static int exynos_drm_plane_atomic_get_property(struct drm_plane *plane,
192 const struct drm_plane_state *state,
193 struct drm_property *property,
194 uint64_t *val)
195{
196 const struct exynos_drm_plane_state *exynos_state =
197 container_of(state, const struct exynos_drm_plane_state, base);
198 struct exynos_drm_private *dev_priv = plane->dev->dev_private;
199
200 if (property == dev_priv->plane_zpos_property)
201 *val = exynos_state->zpos;
202 else
203 return -EINVAL;
204
205 return 0;
108} 206}
109 207
110static struct drm_plane_funcs exynos_plane_funcs = { 208static struct drm_plane_funcs exynos_plane_funcs = {
111 .update_plane = drm_atomic_helper_update_plane, 209 .update_plane = drm_atomic_helper_update_plane,
112 .disable_plane = drm_atomic_helper_disable_plane, 210 .disable_plane = drm_atomic_helper_disable_plane,
113 .destroy = drm_plane_cleanup, 211 .destroy = drm_plane_cleanup,
114 .reset = drm_atomic_helper_plane_reset, 212 .set_property = drm_atomic_helper_plane_set_property,
115 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 213 .reset = exynos_drm_plane_reset,
116 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 214 .atomic_duplicate_state = exynos_drm_plane_duplicate_state,
215 .atomic_destroy_state = exynos_drm_plane_destroy_state,
216 .atomic_set_property = exynos_drm_plane_atomic_set_property,
217 .atomic_get_property = exynos_drm_plane_atomic_get_property,
117}; 218};
118 219
220static int
221exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
222 struct exynos_drm_plane_state *state)
223{
224 bool width_ok = false, height_ok = false;
225
226 if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE)
227 return 0;
228
229 if (state->src.w == state->crtc.w)
230 width_ok = true;
231
232 if (state->src.h == state->crtc.h)
233 height_ok = true;
234
235 if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
236 state->h_ratio == (1 << 15))
237 width_ok = true;
238
239 if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
240 state->v_ratio == (1 << 15))
241 height_ok = true;
242
243 if (width_ok & height_ok)
244 return 0;
245
246 DRM_DEBUG_KMS("scaling mode is not supported");
247 return -ENOTSUPP;
248}
249
119static int exynos_plane_atomic_check(struct drm_plane *plane, 250static int exynos_plane_atomic_check(struct drm_plane *plane,
120 struct drm_plane_state *state) 251 struct drm_plane_state *state)
121{ 252{
122 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 253 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
123 int nr; 254 struct exynos_drm_plane_state *exynos_state =
124 int i; 255 to_exynos_plane_state(state);
256 int ret = 0;
125 257
126 if (!state->fb) 258 if (!state->crtc || !state->fb)
127 return 0; 259 return 0;
128 260
129 nr = drm_format_num_planes(state->fb->pixel_format); 261 /* translate state into exynos_state */
130 for (i = 0; i < nr; i++) { 262 exynos_plane_mode_set(exynos_state);
131 struct exynos_drm_gem *exynos_gem =
132 exynos_drm_fb_gem(state->fb, i);
133 if (!exynos_gem) {
134 DRM_DEBUG_KMS("gem object is null\n");
135 return -EFAULT;
136 }
137
138 exynos_plane->dma_addr[i] = exynos_gem->dma_addr +
139 state->fb->offsets[i];
140 263
141 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", 264 ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
142 i, (unsigned long)exynos_plane->dma_addr[i]); 265 return ret;
143 }
144
145 return 0;
146} 266}
147 267
148static void exynos_plane_atomic_update(struct drm_plane *plane, 268static void exynos_plane_atomic_update(struct drm_plane *plane,
@@ -155,12 +275,7 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
155 if (!state->crtc) 275 if (!state->crtc)
156 return; 276 return;
157 277
158 exynos_plane_mode_set(plane, state->crtc, state->fb, 278 plane->crtc = state->crtc;
159 state->crtc_x, state->crtc_y,
160 state->crtc_w, state->crtc_h,
161 state->src_x >> 16, state->src_y >> 16,
162 state->src_w >> 16, state->src_h >> 16);
163
164 exynos_plane->pending_fb = state->fb; 279 exynos_plane->pending_fb = state->fb;
165 280
166 if (exynos_crtc->ops->update_plane) 281 if (exynos_crtc->ops->update_plane)
@@ -177,8 +292,7 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane,
177 return; 292 return;
178 293
179 if (exynos_crtc->ops->disable_plane) 294 if (exynos_crtc->ops->disable_plane)
180 exynos_crtc->ops->disable_plane(exynos_crtc, 295 exynos_crtc->ops->disable_plane(exynos_crtc, exynos_plane);
181 exynos_plane);
182} 296}
183 297
184static const struct drm_plane_helper_funcs plane_helper_funcs = { 298static const struct drm_plane_helper_funcs plane_helper_funcs = {
@@ -196,8 +310,8 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
196 310
197 prop = dev_priv->plane_zpos_property; 311 prop = dev_priv->plane_zpos_property;
198 if (!prop) { 312 if (!prop) {
199 prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, 313 prop = drm_property_create_range(dev, 0, "zpos",
200 "zpos", 0, MAX_PLANE - 1); 314 0, MAX_PLANE - 1);
201 if (!prop) 315 if (!prop)
202 return; 316 return;
203 317
@@ -207,28 +321,19 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
207 drm_object_attach_property(&plane->base, prop, zpos); 321 drm_object_attach_property(&plane->base, prop, zpos);
208} 322}
209 323
210enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
211 unsigned int cursor_win)
212{
213 if (zpos == DEFAULT_WIN)
214 return DRM_PLANE_TYPE_PRIMARY;
215 else if (zpos == cursor_win)
216 return DRM_PLANE_TYPE_CURSOR;
217 else
218 return DRM_PLANE_TYPE_OVERLAY;
219}
220
221int exynos_plane_init(struct drm_device *dev, 324int exynos_plane_init(struct drm_device *dev,
222 struct exynos_drm_plane *exynos_plane, 325 struct exynos_drm_plane *exynos_plane,
223 unsigned long possible_crtcs, enum drm_plane_type type, 326 unsigned int index, unsigned long possible_crtcs,
224 const uint32_t *formats, unsigned int fcount, 327 const struct exynos_drm_plane_config *config)
225 unsigned int zpos)
226{ 328{
227 int err; 329 int err;
228 330
229 err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs, 331 err = drm_universal_plane_init(dev, &exynos_plane->base,
230 &exynos_plane_funcs, formats, fcount, 332 possible_crtcs,
231 type); 333 &exynos_plane_funcs,
334 config->pixel_formats,
335 config->num_pixel_formats,
336 config->type, NULL);
232 if (err) { 337 if (err) {
233 DRM_ERROR("failed to initialize plane\n"); 338 DRM_ERROR("failed to initialize plane\n");
234 return err; 339 return err;
@@ -236,10 +341,10 @@ int exynos_plane_init(struct drm_device *dev,
236 341
237 drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs); 342 drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs);
238 343
239 exynos_plane->zpos = zpos; 344 exynos_plane->index = index;
345 exynos_plane->config = config;
240 346
241 if (type == DRM_PLANE_TYPE_OVERLAY) 347 exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos);
242 exynos_plane_attach_zpos_property(&exynos_plane->base, zpos);
243 348
244 return 0; 349 return 0;
245} 350}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index abb641e64c23..9aafad164cdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,10 +9,7 @@
9 * 9 *
10 */ 10 */
11 11
12enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
13 unsigned int cursor_win);
14int exynos_plane_init(struct drm_device *dev, 12int exynos_plane_init(struct drm_device *dev,
15 struct exynos_drm_plane *exynos_plane, 13 struct exynos_drm_plane *exynos_plane, unsigned int index,
16 unsigned long possible_crtcs, enum drm_plane_type type, 14 unsigned long possible_crtcs,
17 const uint32_t *formats, unsigned int fcount, 15 const struct exynos_drm_plane_config *config);
18 unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 2f5c118f4c8e..bea0f7826d30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -790,10 +790,10 @@ static int rotator_remove(struct platform_device *pdev)
790static int rotator_clk_crtl(struct rot_context *rot, bool enable) 790static int rotator_clk_crtl(struct rot_context *rot, bool enable)
791{ 791{
792 if (enable) { 792 if (enable) {
793 clk_enable(rot->clock); 793 clk_prepare_enable(rot->clock);
794 rot->suspended = false; 794 rot->suspended = false;
795 } else { 795 } else {
796 clk_disable(rot->clock); 796 clk_disable_unprepare(rot->clock);
797 rot->suspended = true; 797 rot->suspended = true;
798 } 798 }
799 799
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 669362c53f49..62ac4e5fa51d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -24,12 +24,12 @@
24 24
25#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_crtc.h" 26#include "exynos_drm_crtc.h"
27#include "exynos_drm_fb.h"
27#include "exynos_drm_plane.h" 28#include "exynos_drm_plane.h"
28#include "exynos_drm_vidi.h" 29#include "exynos_drm_vidi.h"
29 30
30/* vidi has totally three virtual windows. */ 31/* vidi has totally three virtual windows. */
31#define WINDOWS_NR 3 32#define WINDOWS_NR 3
32#define CURSOR_WIN 2
33 33
34#define ctx_from_connector(c) container_of(c, struct vidi_context, \ 34#define ctx_from_connector(c) container_of(c, struct vidi_context, \
35 connector) 35 connector)
@@ -89,6 +89,12 @@ static const uint32_t formats[] = {
89 DRM_FORMAT_NV12, 89 DRM_FORMAT_NV12,
90}; 90};
91 91
92static const enum drm_plane_type vidi_win_types[WINDOWS_NR] = {
93 DRM_PLANE_TYPE_PRIMARY,
94 DRM_PLANE_TYPE_OVERLAY,
95 DRM_PLANE_TYPE_CURSOR,
96};
97
92static int vidi_enable_vblank(struct exynos_drm_crtc *crtc) 98static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
93{ 99{
94 struct vidi_context *ctx = crtc->ctx; 100 struct vidi_context *ctx = crtc->ctx;
@@ -125,12 +131,15 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
125static void vidi_update_plane(struct exynos_drm_crtc *crtc, 131static void vidi_update_plane(struct exynos_drm_crtc *crtc,
126 struct exynos_drm_plane *plane) 132 struct exynos_drm_plane *plane)
127{ 133{
134 struct drm_plane_state *state = plane->base.state;
128 struct vidi_context *ctx = crtc->ctx; 135 struct vidi_context *ctx = crtc->ctx;
136 dma_addr_t addr;
129 137
130 if (ctx->suspended) 138 if (ctx->suspended)
131 return; 139 return;
132 140
133 DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr); 141 addr = exynos_drm_fb_dma_addr(state->fb, 0);
142 DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
134 143
135 if (ctx->vblank_on) 144 if (ctx->vblank_on)
136 schedule_work(&ctx->work); 145 schedule_work(&ctx->work);
@@ -330,7 +339,7 @@ static void vidi_connector_destroy(struct drm_connector *connector)
330{ 339{
331} 340}
332 341
333static struct drm_connector_funcs vidi_connector_funcs = { 342static const struct drm_connector_funcs vidi_connector_funcs = {
334 .dpms = drm_atomic_helper_connector_dpms, 343 .dpms = drm_atomic_helper_connector_dpms,
335 .fill_modes = drm_helper_probe_single_connector_modes, 344 .fill_modes = drm_helper_probe_single_connector_modes,
336 .detect = vidi_detect, 345 .detect = vidi_detect,
@@ -374,7 +383,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
374 return &ctx->encoder; 383 return &ctx->encoder;
375} 384}
376 385
377static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { 386static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
378 .get_modes = vidi_get_modes, 387 .get_modes = vidi_get_modes,
379 .best_encoder = vidi_best_encoder, 388 .best_encoder = vidi_best_encoder,
380}; 389};
@@ -422,14 +431,14 @@ static void exynos_vidi_disable(struct drm_encoder *encoder)
422{ 431{
423} 432}
424 433
425static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = { 434static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
426 .mode_fixup = exynos_vidi_mode_fixup, 435 .mode_fixup = exynos_vidi_mode_fixup,
427 .mode_set = exynos_vidi_mode_set, 436 .mode_set = exynos_vidi_mode_set,
428 .enable = exynos_vidi_enable, 437 .enable = exynos_vidi_enable,
429 .disable = exynos_vidi_disable, 438 .disable = exynos_vidi_disable,
430}; 439};
431 440
432static struct drm_encoder_funcs exynos_vidi_encoder_funcs = { 441static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
433 .destroy = drm_encoder_cleanup, 442 .destroy = drm_encoder_cleanup,
434}; 443};
435 444
@@ -439,17 +448,21 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
439 struct drm_device *drm_dev = data; 448 struct drm_device *drm_dev = data;
440 struct drm_encoder *encoder = &ctx->encoder; 449 struct drm_encoder *encoder = &ctx->encoder;
441 struct exynos_drm_plane *exynos_plane; 450 struct exynos_drm_plane *exynos_plane;
442 enum drm_plane_type type; 451 struct exynos_drm_plane_config plane_config = { 0 };
443 unsigned int zpos; 452 unsigned int i;
444 int pipe, ret; 453 int pipe, ret;
445 454
446 vidi_ctx_initialize(ctx, drm_dev); 455 vidi_ctx_initialize(ctx, drm_dev);
447 456
448 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 457 plane_config.pixel_formats = formats;
449 type = exynos_plane_get_type(zpos, CURSOR_WIN); 458 plane_config.num_pixel_formats = ARRAY_SIZE(formats);
450 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 459
451 1 << ctx->pipe, type, formats, 460 for (i = 0; i < WINDOWS_NR; i++) {
452 ARRAY_SIZE(formats), zpos); 461 plane_config.zpos = i;
462 plane_config.type = vidi_win_types[i];
463
464 ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
465 1 << ctx->pipe, &plane_config);
453 if (ret) 466 if (ret)
454 return ret; 467 return ret;
455 } 468 }
@@ -473,7 +486,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
473 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 486 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
474 487
475 drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs, 488 drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
476 DRM_MODE_ENCODER_TMDS); 489 DRM_MODE_ENCODER_TMDS, NULL);
477 490
478 drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs); 491 drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
479 492
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 57b675563e94..21a29dbce18c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -113,7 +113,7 @@ struct hdmi_context {
113 void __iomem *regs_hdmiphy; 113 void __iomem *regs_hdmiphy;
114 struct i2c_client *hdmiphy_port; 114 struct i2c_client *hdmiphy_port;
115 struct i2c_adapter *ddc_adpt; 115 struct i2c_adapter *ddc_adpt;
116 struct gpio_desc *hpd_gpio; 116 struct gpio_desc *hpd_gpio;
117 int irq; 117 int irq;
118 struct regmap *pmureg; 118 struct regmap *pmureg;
119 struct clk *hdmi; 119 struct clk *hdmi;
@@ -956,7 +956,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
956 drm_connector_cleanup(connector); 956 drm_connector_cleanup(connector);
957} 957}
958 958
959static struct drm_connector_funcs hdmi_connector_funcs = { 959static const struct drm_connector_funcs hdmi_connector_funcs = {
960 .dpms = drm_atomic_helper_connector_dpms, 960 .dpms = drm_atomic_helper_connector_dpms,
961 .fill_modes = drm_helper_probe_single_connector_modes, 961 .fill_modes = drm_helper_probe_single_connector_modes,
962 .detect = hdmi_detect, 962 .detect = hdmi_detect,
@@ -1030,7 +1030,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
1030 return &hdata->encoder; 1030 return &hdata->encoder;
1031} 1031}
1032 1032
1033static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { 1033static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
1034 .get_modes = hdmi_get_modes, 1034 .get_modes = hdmi_get_modes,
1035 .mode_valid = hdmi_mode_valid, 1035 .mode_valid = hdmi_mode_valid,
1036 .best_encoder = hdmi_best_encoder, 1036 .best_encoder = hdmi_best_encoder,
@@ -1588,8 +1588,6 @@ static void hdmi_enable(struct drm_encoder *encoder)
1588 if (hdata->powered) 1588 if (hdata->powered)
1589 return; 1589 return;
1590 1590
1591 hdata->powered = true;
1592
1593 pm_runtime_get_sync(hdata->dev); 1591 pm_runtime_get_sync(hdata->dev);
1594 1592
1595 if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk)) 1593 if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
@@ -1599,10 +1597,9 @@ static void hdmi_enable(struct drm_encoder *encoder)
1599 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, 1597 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
1600 PMU_HDMI_PHY_ENABLE_BIT, 1); 1598 PMU_HDMI_PHY_ENABLE_BIT, 1);
1601 1599
1602 clk_prepare_enable(hdata->hdmi);
1603 clk_prepare_enable(hdata->sclk_hdmi);
1604
1605 hdmi_conf_apply(hdata); 1600 hdmi_conf_apply(hdata);
1601
1602 hdata->powered = true;
1606} 1603}
1607 1604
1608static void hdmi_disable(struct drm_encoder *encoder) 1605static void hdmi_disable(struct drm_encoder *encoder)
@@ -1633,9 +1630,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
1633 1630
1634 cancel_delayed_work(&hdata->hotplug_work); 1631 cancel_delayed_work(&hdata->hotplug_work);
1635 1632
1636 clk_disable_unprepare(hdata->sclk_hdmi);
1637 clk_disable_unprepare(hdata->hdmi);
1638
1639 /* reset pmu hdmiphy control bit to disable hdmiphy */ 1633 /* reset pmu hdmiphy control bit to disable hdmiphy */
1640 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, 1634 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
1641 PMU_HDMI_PHY_ENABLE_BIT, 0); 1635 PMU_HDMI_PHY_ENABLE_BIT, 0);
@@ -1647,14 +1641,14 @@ static void hdmi_disable(struct drm_encoder *encoder)
1647 hdata->powered = false; 1641 hdata->powered = false;
1648} 1642}
1649 1643
1650static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { 1644static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
1651 .mode_fixup = hdmi_mode_fixup, 1645 .mode_fixup = hdmi_mode_fixup,
1652 .mode_set = hdmi_mode_set, 1646 .mode_set = hdmi_mode_set,
1653 .enable = hdmi_enable, 1647 .enable = hdmi_enable,
1654 .disable = hdmi_disable, 1648 .disable = hdmi_disable,
1655}; 1649};
1656 1650
1657static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = { 1651static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
1658 .destroy = drm_encoder_cleanup, 1652 .destroy = drm_encoder_cleanup,
1659}; 1653};
1660 1654
@@ -1793,7 +1787,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
1793 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1787 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1794 1788
1795 drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs, 1789 drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
1796 DRM_MODE_ENCODER_TMDS); 1790 DRM_MODE_ENCODER_TMDS, NULL);
1797 1791
1798 drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs); 1792 drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
1799 1793
@@ -1978,12 +1972,49 @@ static int hdmi_remove(struct platform_device *pdev)
1978 return 0; 1972 return 0;
1979} 1973}
1980 1974
1975#ifdef CONFIG_PM
1976static int exynos_hdmi_suspend(struct device *dev)
1977{
1978 struct hdmi_context *hdata = dev_get_drvdata(dev);
1979
1980 clk_disable_unprepare(hdata->sclk_hdmi);
1981 clk_disable_unprepare(hdata->hdmi);
1982
1983 return 0;
1984}
1985
1986static int exynos_hdmi_resume(struct device *dev)
1987{
1988 struct hdmi_context *hdata = dev_get_drvdata(dev);
1989 int ret;
1990
1991 ret = clk_prepare_enable(hdata->hdmi);
1992 if (ret < 0) {
1993 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
1994 return ret;
1995 }
1996 ret = clk_prepare_enable(hdata->sclk_hdmi);
1997 if (ret < 0) {
1998 DRM_ERROR("Failed to prepare_enable the sclk_mixer clk [%d]\n",
1999 ret);
2000 return ret;
2001 }
2002
2003 return 0;
2004}
2005#endif
2006
2007static const struct dev_pm_ops exynos_hdmi_pm_ops = {
2008 SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
2009};
2010
1981struct platform_driver hdmi_driver = { 2011struct platform_driver hdmi_driver = {
1982 .probe = hdmi_probe, 2012 .probe = hdmi_probe,
1983 .remove = hdmi_remove, 2013 .remove = hdmi_remove,
1984 .driver = { 2014 .driver = {
1985 .name = "exynos-hdmi", 2015 .name = "exynos-hdmi",
1986 .owner = THIS_MODULE, 2016 .owner = THIS_MODULE,
2017 .pm = &exynos_hdmi_pm_ops,
1987 .of_match_table = hdmi_match_types, 2018 .of_match_table = hdmi_match_types,
1988 }, 2019 },
1989}; 2020};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index d09f8f9a8939..b5fbc1cbf024 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,12 +37,12 @@
37 37
38#include "exynos_drm_drv.h" 38#include "exynos_drm_drv.h"
39#include "exynos_drm_crtc.h" 39#include "exynos_drm_crtc.h"
40#include "exynos_drm_fb.h"
40#include "exynos_drm_plane.h" 41#include "exynos_drm_plane.h"
41#include "exynos_drm_iommu.h" 42#include "exynos_drm_iommu.h"
42 43
43#define MIXER_WIN_NR 3 44#define MIXER_WIN_NR 3
44#define VP_DEFAULT_WIN 2 45#define VP_DEFAULT_WIN 2
45#define CURSOR_WIN 1
46 46
47/* The pixelformats that are natively supported by the mixer. */ 47/* The pixelformats that are natively supported by the mixer. */
48#define MXR_FORMAT_RGB565 4 48#define MXR_FORMAT_RGB565 4
@@ -76,7 +76,9 @@ enum mixer_flag_bits {
76 76
77static const uint32_t mixer_formats[] = { 77static const uint32_t mixer_formats[] = {
78 DRM_FORMAT_XRGB4444, 78 DRM_FORMAT_XRGB4444,
79 DRM_FORMAT_ARGB4444,
79 DRM_FORMAT_XRGB1555, 80 DRM_FORMAT_XRGB1555,
81 DRM_FORMAT_ARGB1555,
80 DRM_FORMAT_RGB565, 82 DRM_FORMAT_RGB565,
81 DRM_FORMAT_XRGB8888, 83 DRM_FORMAT_XRGB8888,
82 DRM_FORMAT_ARGB8888, 84 DRM_FORMAT_ARGB8888,
@@ -111,6 +113,31 @@ struct mixer_drv_data {
111 bool has_sclk; 113 bool has_sclk;
112}; 114};
113 115
116static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
117 {
118 .zpos = 0,
119 .type = DRM_PLANE_TYPE_PRIMARY,
120 .pixel_formats = mixer_formats,
121 .num_pixel_formats = ARRAY_SIZE(mixer_formats),
122 .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
123 EXYNOS_DRM_PLANE_CAP_ZPOS,
124 }, {
125 .zpos = 1,
126 .type = DRM_PLANE_TYPE_CURSOR,
127 .pixel_formats = mixer_formats,
128 .num_pixel_formats = ARRAY_SIZE(mixer_formats),
129 .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
130 EXYNOS_DRM_PLANE_CAP_ZPOS,
131 }, {
132 .zpos = 2,
133 .type = DRM_PLANE_TYPE_OVERLAY,
134 .pixel_formats = vp_formats,
135 .num_pixel_formats = ARRAY_SIZE(vp_formats),
136 .capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
137 EXYNOS_DRM_PLANE_CAP_ZPOS,
138 },
139};
140
114static const u8 filter_y_horiz_tap8[] = { 141static const u8 filter_y_horiz_tap8[] = {
115 0, -1, -1, -1, -1, -1, -1, -1, 142 0, -1, -1, -1, -1, -1, -1, -1,
116 -1, -1, -1, -1, -1, 0, 0, 0, 143 -1, -1, -1, -1, -1, 0, 0, 0,
@@ -140,6 +167,18 @@ static const u8 filter_cr_horiz_tap4[] = {
140 70, 59, 48, 37, 27, 19, 11, 5, 167 70, 59, 48, 37, 27, 19, 11, 5,
141}; 168};
142 169
170static inline bool is_alpha_format(unsigned int pixel_format)
171{
172 switch (pixel_format) {
173 case DRM_FORMAT_ARGB8888:
174 case DRM_FORMAT_ARGB1555:
175 case DRM_FORMAT_ARGB4444:
176 return true;
177 default:
178 return false;
179 }
180}
181
143static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id) 182static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
144{ 183{
145 return readl(res->vp_regs + reg_id); 184 return readl(res->vp_regs + reg_id);
@@ -269,6 +308,37 @@ static void vp_default_filter(struct mixer_resources *res)
269 filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4)); 308 filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
270} 309}
271 310
311static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
312 bool alpha)
313{
314 struct mixer_resources *res = &ctx->mixer_res;
315 u32 val;
316
317 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
318 if (alpha) {
319 /* blending based on pixel alpha */
320 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
321 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
322 }
323 mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
324 val, MXR_GRP_CFG_MISC_MASK);
325}
326
327static void mixer_cfg_vp_blend(struct mixer_context *ctx)
328{
329 struct mixer_resources *res = &ctx->mixer_res;
330 u32 val;
331
332 /*
333 * No blending at the moment since the NV12/NV21 pixelformats don't
334 * have an alpha channel. However the mixer supports a global alpha
335 * value for a layer. Once this functionality is exposed, we can
336 * support blending of the video layer through this.
337 */
338 val = 0;
339 mixer_reg_write(res, MXR_VIDEO_CFG, val);
340}
341
272static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) 342static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
273{ 343{
274 struct mixer_resources *res = &ctx->mixer_res; 344 struct mixer_resources *res = &ctx->mixer_res;
@@ -350,7 +420,7 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
350} 420}
351 421
352static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, 422static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
353 bool enable) 423 unsigned int priority, bool enable)
354{ 424{
355 struct mixer_resources *res = &ctx->mixer_res; 425 struct mixer_resources *res = &ctx->mixer_res;
356 u32 val = enable ? ~0 : 0; 426 u32 val = enable ? ~0 : 0;
@@ -358,20 +428,24 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
358 switch (win) { 428 switch (win) {
359 case 0: 429 case 0:
360 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); 430 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
431 mixer_reg_writemask(res, MXR_LAYER_CFG,
432 MXR_LAYER_CFG_GRP0_VAL(priority),
433 MXR_LAYER_CFG_GRP0_MASK);
361 break; 434 break;
362 case 1: 435 case 1:
363 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); 436 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
437 mixer_reg_writemask(res, MXR_LAYER_CFG,
438 MXR_LAYER_CFG_GRP1_VAL(priority),
439 MXR_LAYER_CFG_GRP1_MASK);
364 break; 440 break;
365 case 2: 441 case VP_DEFAULT_WIN:
366 if (ctx->vp_enabled) { 442 if (ctx->vp_enabled) {
367 vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); 443 vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
368 mixer_reg_writemask(res, MXR_CFG, val, 444 mixer_reg_writemask(res, MXR_CFG, val,
369 MXR_CFG_VP_ENABLE); 445 MXR_CFG_VP_ENABLE);
370 446 mixer_reg_writemask(res, MXR_LAYER_CFG,
371 /* control blending of graphic layer 0 */ 447 MXR_LAYER_CFG_VP_VAL(priority),
372 mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val, 448 MXR_LAYER_CFG_VP_MASK);
373 MXR_GRP_CFG_BLEND_PRE_MUL |
374 MXR_GRP_CFG_PIXEL_BLEND_EN);
375 } 449 }
376 break; 450 break;
377 } 451 }
@@ -399,10 +473,11 @@ static void mixer_stop(struct mixer_context *ctx)
399static void vp_video_buffer(struct mixer_context *ctx, 473static void vp_video_buffer(struct mixer_context *ctx,
400 struct exynos_drm_plane *plane) 474 struct exynos_drm_plane *plane)
401{ 475{
476 struct exynos_drm_plane_state *state =
477 to_exynos_plane_state(plane->base.state);
478 struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
402 struct mixer_resources *res = &ctx->mixer_res; 479 struct mixer_resources *res = &ctx->mixer_res;
403 struct drm_plane_state *state = plane->base.state; 480 struct drm_framebuffer *fb = state->base.fb;
404 struct drm_framebuffer *fb = state->fb;
405 struct drm_display_mode *mode = &state->crtc->mode;
406 unsigned long flags; 481 unsigned long flags;
407 dma_addr_t luma_addr[2], chroma_addr[2]; 482 dma_addr_t luma_addr[2], chroma_addr[2];
408 bool tiled_mode = false; 483 bool tiled_mode = false;
@@ -422,8 +497,8 @@ static void vp_video_buffer(struct mixer_context *ctx,
422 return; 497 return;
423 } 498 }
424 499
425 luma_addr[0] = plane->dma_addr[0]; 500 luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
426 chroma_addr[0] = plane->dma_addr[1]; 501 chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
427 502
428 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 503 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
429 ctx->interlace = true; 504 ctx->interlace = true;
@@ -441,7 +516,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
441 } 516 }
442 517
443 spin_lock_irqsave(&res->reg_slock, flags); 518 spin_lock_irqsave(&res->reg_slock, flags);
444 mixer_vsync_set_update(ctx, false);
445 519
446 /* interlace or progressive scan mode */ 520 /* interlace or progressive scan mode */
447 val = (ctx->interlace ? ~0 : 0); 521 val = (ctx->interlace ? ~0 : 0);
@@ -459,24 +533,24 @@ static void vp_video_buffer(struct mixer_context *ctx,
459 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | 533 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
460 VP_IMG_VSIZE(fb->height / 2)); 534 VP_IMG_VSIZE(fb->height / 2));
461 535
462 vp_reg_write(res, VP_SRC_WIDTH, plane->src_w); 536 vp_reg_write(res, VP_SRC_WIDTH, state->src.w);
463 vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h); 537 vp_reg_write(res, VP_SRC_HEIGHT, state->src.h);
464 vp_reg_write(res, VP_SRC_H_POSITION, 538 vp_reg_write(res, VP_SRC_H_POSITION,
465 VP_SRC_H_POSITION_VAL(plane->src_x)); 539 VP_SRC_H_POSITION_VAL(state->src.x));
466 vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y); 540 vp_reg_write(res, VP_SRC_V_POSITION, state->src.y);
467 541
468 vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w); 542 vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
469 vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x); 543 vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
470 if (ctx->interlace) { 544 if (ctx->interlace) {
471 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2); 545 vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
472 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2); 546 vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
473 } else { 547 } else {
474 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h); 548 vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h);
475 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y); 549 vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y);
476 } 550 }
477 551
478 vp_reg_write(res, VP_H_RATIO, plane->h_ratio); 552 vp_reg_write(res, VP_H_RATIO, state->h_ratio);
479 vp_reg_write(res, VP_V_RATIO, plane->v_ratio); 553 vp_reg_write(res, VP_V_RATIO, state->v_ratio);
480 554
481 vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE); 555 vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
482 556
@@ -488,10 +562,10 @@ static void vp_video_buffer(struct mixer_context *ctx,
488 562
489 mixer_cfg_scan(ctx, mode->vdisplay); 563 mixer_cfg_scan(ctx, mode->vdisplay);
490 mixer_cfg_rgb_fmt(ctx, mode->vdisplay); 564 mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
491 mixer_cfg_layer(ctx, plane->zpos, true); 565 mixer_cfg_layer(ctx, plane->index, state->zpos + 1, true);
566 mixer_cfg_vp_blend(ctx);
492 mixer_run(ctx); 567 mixer_run(ctx);
493 568
494 mixer_vsync_set_update(ctx, true);
495 spin_unlock_irqrestore(&res->reg_slock, flags); 569 spin_unlock_irqrestore(&res->reg_slock, flags);
496 570
497 mixer_regs_dump(ctx); 571 mixer_regs_dump(ctx);
@@ -505,39 +579,16 @@ static void mixer_layer_update(struct mixer_context *ctx)
505 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); 579 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
506} 580}
507 581
508static int mixer_setup_scale(const struct exynos_drm_plane *plane,
509 unsigned int *x_ratio, unsigned int *y_ratio)
510{
511 if (plane->crtc_w != plane->src_w) {
512 if (plane->crtc_w == 2 * plane->src_w)
513 *x_ratio = 1;
514 else
515 goto fail;
516 }
517
518 if (plane->crtc_h != plane->src_h) {
519 if (plane->crtc_h == 2 * plane->src_h)
520 *y_ratio = 1;
521 else
522 goto fail;
523 }
524
525 return 0;
526
527fail:
528 DRM_DEBUG_KMS("only 2x width/height scaling of plane supported\n");
529 return -ENOTSUPP;
530}
531
532static void mixer_graph_buffer(struct mixer_context *ctx, 582static void mixer_graph_buffer(struct mixer_context *ctx,
533 struct exynos_drm_plane *plane) 583 struct exynos_drm_plane *plane)
534{ 584{
585 struct exynos_drm_plane_state *state =
586 to_exynos_plane_state(plane->base.state);
587 struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
535 struct mixer_resources *res = &ctx->mixer_res; 588 struct mixer_resources *res = &ctx->mixer_res;
536 struct drm_plane_state *state = plane->base.state; 589 struct drm_framebuffer *fb = state->base.fb;
537 struct drm_framebuffer *fb = state->fb;
538 struct drm_display_mode *mode = &state->crtc->mode;
539 unsigned long flags; 590 unsigned long flags;
540 unsigned int win = plane->zpos; 591 unsigned int win = plane->index;
541 unsigned int x_ratio = 0, y_ratio = 0; 592 unsigned int x_ratio = 0, y_ratio = 0;
542 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset; 593 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
543 dma_addr_t dma_addr; 594 dma_addr_t dma_addr;
@@ -546,10 +597,12 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
546 597
547 switch (fb->pixel_format) { 598 switch (fb->pixel_format) {
548 case DRM_FORMAT_XRGB4444: 599 case DRM_FORMAT_XRGB4444:
600 case DRM_FORMAT_ARGB4444:
549 fmt = MXR_FORMAT_ARGB4444; 601 fmt = MXR_FORMAT_ARGB4444;
550 break; 602 break;
551 603
552 case DRM_FORMAT_XRGB1555: 604 case DRM_FORMAT_XRGB1555:
605 case DRM_FORMAT_ARGB1555:
553 fmt = MXR_FORMAT_ARGB1555; 606 fmt = MXR_FORMAT_ARGB1555;
554 break; 607 break;
555 608
@@ -567,17 +620,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
567 return; 620 return;
568 } 621 }
569 622
570 /* check if mixer supports requested scaling setup */ 623 /* ratio is already checked by common plane code */
571 if (mixer_setup_scale(plane, &x_ratio, &y_ratio)) 624 x_ratio = state->h_ratio == (1 << 15);
572 return; 625 y_ratio = state->v_ratio == (1 << 15);
573 626
574 dst_x_offset = plane->crtc_x; 627 dst_x_offset = state->crtc.x;
575 dst_y_offset = plane->crtc_y; 628 dst_y_offset = state->crtc.y;
576 629
577 /* converting dma address base and source offset */ 630 /* converting dma address base and source offset */
578 dma_addr = plane->dma_addr[0] 631 dma_addr = exynos_drm_fb_dma_addr(fb, 0)
579 + (plane->src_x * fb->bits_per_pixel >> 3) 632 + (state->src.x * fb->bits_per_pixel >> 3)
580 + (plane->src_y * fb->pitches[0]); 633 + (state->src.y * fb->pitches[0]);
581 src_x_offset = 0; 634 src_x_offset = 0;
582 src_y_offset = 0; 635 src_y_offset = 0;
583 636
@@ -587,7 +640,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
587 ctx->interlace = false; 640 ctx->interlace = false;
588 641
589 spin_lock_irqsave(&res->reg_slock, flags); 642 spin_lock_irqsave(&res->reg_slock, flags);
590 mixer_vsync_set_update(ctx, false);
591 643
592 /* setup format */ 644 /* setup format */
593 mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win), 645 mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
@@ -605,8 +657,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
605 mixer_reg_write(res, MXR_RESOLUTION, val); 657 mixer_reg_write(res, MXR_RESOLUTION, val);
606 } 658 }
607 659
608 val = MXR_GRP_WH_WIDTH(plane->src_w); 660 val = MXR_GRP_WH_WIDTH(state->src.w);
609 val |= MXR_GRP_WH_HEIGHT(plane->src_h); 661 val |= MXR_GRP_WH_HEIGHT(state->src.h);
610 val |= MXR_GRP_WH_H_SCALE(x_ratio); 662 val |= MXR_GRP_WH_H_SCALE(x_ratio);
611 val |= MXR_GRP_WH_V_SCALE(y_ratio); 663 val |= MXR_GRP_WH_V_SCALE(y_ratio);
612 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); 664 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -626,7 +678,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
626 678
627 mixer_cfg_scan(ctx, mode->vdisplay); 679 mixer_cfg_scan(ctx, mode->vdisplay);
628 mixer_cfg_rgb_fmt(ctx, mode->vdisplay); 680 mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
629 mixer_cfg_layer(ctx, win, true); 681 mixer_cfg_layer(ctx, win, state->zpos + 1, true);
682 mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->pixel_format));
630 683
631 /* layer update mandatory for mixer 16.0.33.0 */ 684 /* layer update mandatory for mixer 16.0.33.0 */
632 if (ctx->mxr_ver == MXR_VER_16_0_33_0 || 685 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
@@ -635,7 +688,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
635 688
636 mixer_run(ctx); 689 mixer_run(ctx);
637 690
638 mixer_vsync_set_update(ctx, true);
639 spin_unlock_irqrestore(&res->reg_slock, flags); 691 spin_unlock_irqrestore(&res->reg_slock, flags);
640 692
641 mixer_regs_dump(ctx); 693 mixer_regs_dump(ctx);
@@ -660,10 +712,8 @@ static void mixer_win_reset(struct mixer_context *ctx)
660{ 712{
661 struct mixer_resources *res = &ctx->mixer_res; 713 struct mixer_resources *res = &ctx->mixer_res;
662 unsigned long flags; 714 unsigned long flags;
663 u32 val; /* value stored to register */
664 715
665 spin_lock_irqsave(&res->reg_slock, flags); 716 spin_lock_irqsave(&res->reg_slock, flags);
666 mixer_vsync_set_update(ctx, false);
667 717
668 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK); 718 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
669 719
@@ -674,40 +724,14 @@ static void mixer_win_reset(struct mixer_context *ctx)
674 mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST, 724 mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
675 MXR_STATUS_BURST_MASK); 725 MXR_STATUS_BURST_MASK);
676 726
677 /* setting default layer priority: layer1 > layer0 > video 727 /* reset default layer priority */
678 * because typical usage scenario would be 728 mixer_reg_write(res, MXR_LAYER_CFG, 0);
679 * layer1 - OSD
680 * layer0 - framebuffer
681 * video - video overlay
682 */
683 val = MXR_LAYER_CFG_GRP1_VAL(3);
684 val |= MXR_LAYER_CFG_GRP0_VAL(2);
685 if (ctx->vp_enabled)
686 val |= MXR_LAYER_CFG_VP_VAL(1);
687 mixer_reg_write(res, MXR_LAYER_CFG, val);
688 729
689 /* setting background color */ 730 /* setting background color */
690 mixer_reg_write(res, MXR_BG_COLOR0, 0x008080); 731 mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
691 mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); 732 mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
692 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); 733 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
693 734
694 /* setting graphical layers */
695 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
696 val |= MXR_GRP_CFG_WIN_BLEND_EN;
697 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
698
699 /* Don't blend layer 0 onto the mixer background */
700 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
701
702 /* Blend layer 1 into layer 0 */
703 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
704 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
705 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
706
707 /* setting video layers */
708 val = MXR_GRP_CFG_ALPHA_VAL(0);
709 mixer_reg_write(res, MXR_VIDEO_CFG, val);
710
711 if (ctx->vp_enabled) { 735 if (ctx->vp_enabled) {
712 /* configuration of Video Processor Registers */ 736 /* configuration of Video Processor Registers */
713 vp_win_reset(ctx); 737 vp_win_reset(ctx);
@@ -720,7 +744,6 @@ static void mixer_win_reset(struct mixer_context *ctx)
720 if (ctx->vp_enabled) 744 if (ctx->vp_enabled)
721 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); 745 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
722 746
723 mixer_vsync_set_update(ctx, true);
724 spin_unlock_irqrestore(&res->reg_slock, flags); 747 spin_unlock_irqrestore(&res->reg_slock, flags);
725} 748}
726 749
@@ -951,17 +974,27 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
951 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 974 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
952} 975}
953 976
977static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
978{
979 struct mixer_context *mixer_ctx = crtc->ctx;
980
981 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
982 return;
983
984 mixer_vsync_set_update(mixer_ctx, false);
985}
986
954static void mixer_update_plane(struct exynos_drm_crtc *crtc, 987static void mixer_update_plane(struct exynos_drm_crtc *crtc,
955 struct exynos_drm_plane *plane) 988 struct exynos_drm_plane *plane)
956{ 989{
957 struct mixer_context *mixer_ctx = crtc->ctx; 990 struct mixer_context *mixer_ctx = crtc->ctx;
958 991
959 DRM_DEBUG_KMS("win: %d\n", plane->zpos); 992 DRM_DEBUG_KMS("win: %d\n", plane->index);
960 993
961 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) 994 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
962 return; 995 return;
963 996
964 if (plane->zpos > 1 && mixer_ctx->vp_enabled) 997 if (plane->index == VP_DEFAULT_WIN)
965 vp_video_buffer(mixer_ctx, plane); 998 vp_video_buffer(mixer_ctx, plane);
966 else 999 else
967 mixer_graph_buffer(mixer_ctx, plane); 1000 mixer_graph_buffer(mixer_ctx, plane);
@@ -974,18 +1007,24 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
974 struct mixer_resources *res = &mixer_ctx->mixer_res; 1007 struct mixer_resources *res = &mixer_ctx->mixer_res;
975 unsigned long flags; 1008 unsigned long flags;
976 1009
977 DRM_DEBUG_KMS("win: %d\n", plane->zpos); 1010 DRM_DEBUG_KMS("win: %d\n", plane->index);
978 1011
979 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) 1012 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
980 return; 1013 return;
981 1014
982 spin_lock_irqsave(&res->reg_slock, flags); 1015 spin_lock_irqsave(&res->reg_slock, flags);
983 mixer_vsync_set_update(mixer_ctx, false); 1016 mixer_cfg_layer(mixer_ctx, plane->index, 0, false);
1017 spin_unlock_irqrestore(&res->reg_slock, flags);
1018}
984 1019
985 mixer_cfg_layer(mixer_ctx, plane->zpos, false); 1020static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
1021{
1022 struct mixer_context *mixer_ctx = crtc->ctx;
1023
1024 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
1025 return;
986 1026
987 mixer_vsync_set_update(mixer_ctx, true); 1027 mixer_vsync_set_update(mixer_ctx, true);
988 spin_unlock_irqrestore(&res->reg_slock, flags);
989} 1028}
990 1029
991static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc) 1030static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
@@ -1020,42 +1059,13 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1020{ 1059{
1021 struct mixer_context *ctx = crtc->ctx; 1060 struct mixer_context *ctx = crtc->ctx;
1022 struct mixer_resources *res = &ctx->mixer_res; 1061 struct mixer_resources *res = &ctx->mixer_res;
1023 int ret;
1024 1062
1025 if (test_bit(MXR_BIT_POWERED, &ctx->flags)) 1063 if (test_bit(MXR_BIT_POWERED, &ctx->flags))
1026 return; 1064 return;
1027 1065
1028 pm_runtime_get_sync(ctx->dev); 1066 pm_runtime_get_sync(ctx->dev);
1029 1067
1030 ret = clk_prepare_enable(res->mixer); 1068 mixer_vsync_set_update(ctx, false);
1031 if (ret < 0) {
1032 DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
1033 return;
1034 }
1035 ret = clk_prepare_enable(res->hdmi);
1036 if (ret < 0) {
1037 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
1038 return;
1039 }
1040 if (ctx->vp_enabled) {
1041 ret = clk_prepare_enable(res->vp);
1042 if (ret < 0) {
1043 DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
1044 ret);
1045 return;
1046 }
1047 if (ctx->has_sclk) {
1048 ret = clk_prepare_enable(res->sclk_mixer);
1049 if (ret < 0) {
1050 DRM_ERROR("Failed to prepare_enable the " \
1051 "sclk_mixer clk [%d]\n",
1052 ret);
1053 return;
1054 }
1055 }
1056 }
1057
1058 set_bit(MXR_BIT_POWERED, &ctx->flags);
1059 1069
1060 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1070 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1061 1071
@@ -1064,12 +1074,15 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1064 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); 1074 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
1065 } 1075 }
1066 mixer_win_reset(ctx); 1076 mixer_win_reset(ctx);
1077
1078 mixer_vsync_set_update(ctx, true);
1079
1080 set_bit(MXR_BIT_POWERED, &ctx->flags);
1067} 1081}
1068 1082
1069static void mixer_disable(struct exynos_drm_crtc *crtc) 1083static void mixer_disable(struct exynos_drm_crtc *crtc)
1070{ 1084{
1071 struct mixer_context *ctx = crtc->ctx; 1085 struct mixer_context *ctx = crtc->ctx;
1072 struct mixer_resources *res = &ctx->mixer_res;
1073 int i; 1086 int i;
1074 1087
1075 if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) 1088 if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
@@ -1081,17 +1094,9 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
1081 for (i = 0; i < MIXER_WIN_NR; i++) 1094 for (i = 0; i < MIXER_WIN_NR; i++)
1082 mixer_disable_plane(crtc, &ctx->planes[i]); 1095 mixer_disable_plane(crtc, &ctx->planes[i]);
1083 1096
1084 clear_bit(MXR_BIT_POWERED, &ctx->flags); 1097 pm_runtime_put(ctx->dev);
1085 1098
1086 clk_disable_unprepare(res->hdmi); 1099 clear_bit(MXR_BIT_POWERED, &ctx->flags);
1087 clk_disable_unprepare(res->mixer);
1088 if (ctx->vp_enabled) {
1089 clk_disable_unprepare(res->vp);
1090 if (ctx->has_sclk)
1091 clk_disable_unprepare(res->sclk_mixer);
1092 }
1093
1094 pm_runtime_put_sync(ctx->dev);
1095} 1100}
1096 1101
1097/* Only valid for Mixer version 16.0.33.0 */ 1102/* Only valid for Mixer version 16.0.33.0 */
@@ -1122,8 +1127,10 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1122 .enable_vblank = mixer_enable_vblank, 1127 .enable_vblank = mixer_enable_vblank,
1123 .disable_vblank = mixer_disable_vblank, 1128 .disable_vblank = mixer_disable_vblank,
1124 .wait_for_vblank = mixer_wait_for_vblank, 1129 .wait_for_vblank = mixer_wait_for_vblank,
1130 .atomic_begin = mixer_atomic_begin,
1125 .update_plane = mixer_update_plane, 1131 .update_plane = mixer_update_plane,
1126 .disable_plane = mixer_disable_plane, 1132 .disable_plane = mixer_disable_plane,
1133 .atomic_flush = mixer_atomic_flush,
1127 .atomic_check = mixer_atomic_check, 1134 .atomic_check = mixer_atomic_check,
1128}; 1135};
1129 1136
@@ -1187,30 +1194,19 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1187 struct mixer_context *ctx = dev_get_drvdata(dev); 1194 struct mixer_context *ctx = dev_get_drvdata(dev);
1188 struct drm_device *drm_dev = data; 1195 struct drm_device *drm_dev = data;
1189 struct exynos_drm_plane *exynos_plane; 1196 struct exynos_drm_plane *exynos_plane;
1190 unsigned int zpos; 1197 unsigned int i;
1191 int ret; 1198 int ret;
1192 1199
1193 ret = mixer_initialize(ctx, drm_dev); 1200 ret = mixer_initialize(ctx, drm_dev);
1194 if (ret) 1201 if (ret)
1195 return ret; 1202 return ret;
1196 1203
1197 for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) { 1204 for (i = 0; i < MIXER_WIN_NR; i++) {
1198 enum drm_plane_type type; 1205 if (i == VP_DEFAULT_WIN && !ctx->vp_enabled)
1199 const uint32_t *formats; 1206 continue;
1200 unsigned int fcount;
1201
1202 if (zpos < VP_DEFAULT_WIN) {
1203 formats = mixer_formats;
1204 fcount = ARRAY_SIZE(mixer_formats);
1205 } else {
1206 formats = vp_formats;
1207 fcount = ARRAY_SIZE(vp_formats);
1208 }
1209 1207
1210 type = exynos_plane_get_type(zpos, CURSOR_WIN); 1208 ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
1211 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 1209 1 << ctx->pipe, &plane_configs[i]);
1212 1 << ctx->pipe, type, formats, fcount,
1213 zpos);
1214 if (ret) 1210 if (ret)
1215 return ret; 1211 return ret;
1216 } 1212 }
@@ -1293,10 +1289,70 @@ static int mixer_remove(struct platform_device *pdev)
1293 return 0; 1289 return 0;
1294} 1290}
1295 1291
1292#ifdef CONFIG_PM_SLEEP
1293static int exynos_mixer_suspend(struct device *dev)
1294{
1295 struct mixer_context *ctx = dev_get_drvdata(dev);
1296 struct mixer_resources *res = &ctx->mixer_res;
1297
1298 clk_disable_unprepare(res->hdmi);
1299 clk_disable_unprepare(res->mixer);
1300 if (ctx->vp_enabled) {
1301 clk_disable_unprepare(res->vp);
1302 if (ctx->has_sclk)
1303 clk_disable_unprepare(res->sclk_mixer);
1304 }
1305
1306 return 0;
1307}
1308
1309static int exynos_mixer_resume(struct device *dev)
1310{
1311 struct mixer_context *ctx = dev_get_drvdata(dev);
1312 struct mixer_resources *res = &ctx->mixer_res;
1313 int ret;
1314
1315 ret = clk_prepare_enable(res->mixer);
1316 if (ret < 0) {
1317 DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
1318 return ret;
1319 }
1320 ret = clk_prepare_enable(res->hdmi);
1321 if (ret < 0) {
1322 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
1323 return ret;
1324 }
1325 if (ctx->vp_enabled) {
1326 ret = clk_prepare_enable(res->vp);
1327 if (ret < 0) {
1328 DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
1329 ret);
1330 return ret;
1331 }
1332 if (ctx->has_sclk) {
1333 ret = clk_prepare_enable(res->sclk_mixer);
1334 if (ret < 0) {
1335 DRM_ERROR("Failed to prepare_enable the " \
1336 "sclk_mixer clk [%d]\n",
1337 ret);
1338 return ret;
1339 }
1340 }
1341 }
1342
1343 return 0;
1344}
1345#endif
1346
1347static const struct dev_pm_ops exynos_mixer_pm_ops = {
1348 SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
1349};
1350
1296struct platform_driver mixer_driver = { 1351struct platform_driver mixer_driver = {
1297 .driver = { 1352 .driver = {
1298 .name = "exynos-mixer", 1353 .name = "exynos-mixer",
1299 .owner = THIS_MODULE, 1354 .owner = THIS_MODULE,
1355 .pm = &exynos_mixer_pm_ops,
1300 .of_match_table = mixer_match_types, 1356 .of_match_table = mixer_match_types,
1301 }, 1357 },
1302 .probe = mixer_probe, 1358 .probe = mixer_probe,
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 9ad592707aaf..4704a993cbb7 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -273,12 +273,12 @@
273#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0) 273#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
274 274
275/* SYSCON. GSCBLK_CFG */ 275/* SYSCON. GSCBLK_CFG */
276#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224) 276#define SYSREG_GSCBLK_CFG1 0x0224
277#define GSC_BLK_DISP1WB_DEST(x) (x << 10) 277#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
278#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x)) 278#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
279#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x)) 279#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
280#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x)) 280#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
281#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000) 281#define SYSREG_GSCBLK_CFG2 0x2000
282#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x)) 282#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
283 283
284#endif /* EXYNOS_REGS_GSC_H_ */ 284#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index ac60260c2389..7f22df5bf707 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -113,6 +113,7 @@
113#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20) 113#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
114#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17) 114#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17)
115#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16) 115#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16)
116#define MXR_GRP_CFG_MISC_MASK ((3 << 16) | (3 << 20))
116#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8) 117#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
117#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0) 118#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
118#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0) 119#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
@@ -145,8 +146,11 @@
145 146
146/* bit for MXR_LAYER_CFG */ 147/* bit for MXR_LAYER_CFG */
147#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8) 148#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
149#define MXR_LAYER_CFG_GRP1_MASK MXR_LAYER_CFG_GRP1_VAL(~0)
148#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4) 150#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
151#define MXR_LAYER_CFG_GRP0_MASK MXR_LAYER_CFG_GRP0_VAL(~0)
149#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0) 152#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
153#define MXR_LAYER_CFG_VP_MASK MXR_LAYER_CFG_VP_VAL(~0)
150 154
151#endif /* SAMSUNG_REGS_MIXER_H */ 155#endif /* SAMSUNG_REGS_MIXER_H */
152 156
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 82a3d311e164..d8ab8f0af10c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -175,7 +175,7 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
175 175
176 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); 176 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
177 ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL, 177 ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
178 &fsl_dcu_drm_crtc_funcs); 178 &fsl_dcu_drm_crtc_funcs, NULL);
179 if (ret < 0) 179 if (ret < 0)
180 return ret; 180 return ret;
181 181
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 1930234ba5f1..fca97d3fc846 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -363,7 +363,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
363 fsl_dev->np = dev->of_node; 363 fsl_dev->np = dev->of_node;
364 drm->dev_private = fsl_dev; 364 drm->dev_private = fsl_dev;
365 dev_set_drvdata(dev, fsl_dev); 365 dev_set_drvdata(dev, fsl_dev);
366 drm_dev_set_unique(drm, dev_name(dev));
367 366
368 ret = drm_dev_register(drm, 0); 367 ret = drm_dev_register(drm, 0);
369 if (ret < 0) 368 if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 51daaea40b4d..4b13cf919575 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -249,7 +249,7 @@ struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
249 &fsl_dcu_drm_plane_funcs, 249 &fsl_dcu_drm_plane_funcs,
250 fsl_dcu_drm_plane_formats, 250 fsl_dcu_drm_plane_formats,
251 ARRAY_SIZE(fsl_dcu_drm_plane_formats), 251 ARRAY_SIZE(fsl_dcu_drm_plane_formats),
252 DRM_PLANE_TYPE_PRIMARY); 252 DRM_PLANE_TYPE_PRIMARY, NULL);
253 if (ret) { 253 if (ret) {
254 kfree(primary); 254 kfree(primary);
255 primary = NULL; 255 primary = NULL;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index fe8ab5da04fb..8780deba5e8a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -57,7 +57,7 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
57 57
58 encoder->possible_crtcs = 1; 58 encoder->possible_crtcs = 1;
59 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, 59 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
60 DRM_MODE_ENCODER_LVDS); 60 DRM_MODE_ENCODER_LVDS, NULL);
61 if (ret < 0) 61 if (ret < 0)
62 return ret; 62 return ret;
63 63
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3531f90e53d0..8745971a7680 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -619,6 +619,8 @@ const struct psb_ops cdv_chip_ops = {
619 .init_pm = cdv_init_pm, 619 .init_pm = cdv_init_pm,
620 .save_regs = cdv_save_display_registers, 620 .save_regs = cdv_save_display_registers,
621 .restore_regs = cdv_restore_display_registers, 621 .restore_regs = cdv_restore_display_registers,
622 .save_crtc = gma_crtc_save,
623 .restore_crtc = gma_crtc_restore,
622 .power_down = cdv_power_down, 624 .power_down = cdv_power_down,
623 .power_up = cdv_power_up, 625 .power_up = cdv_power_up,
624 .update_wm = cdv_update_wm, 626 .update_wm = cdv_update_wm,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 248c33a35ebf..d0717a85c7ec 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -273,7 +273,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
273 273
274 encoder = &gma_encoder->base; 274 encoder = &gma_encoder->base;
275 drm_encoder_init(dev, encoder, 275 drm_encoder_init(dev, encoder,
276 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); 276 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
277 277
278 gma_connector_attach_encoder(gma_connector, gma_encoder); 278 gma_connector_attach_encoder(gma_connector, gma_encoder);
279 279
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 7d47b3d5cc0d..6126546295e9 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -983,8 +983,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
983}; 983};
984 984
985const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 985const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
986 .save = gma_crtc_save,
987 .restore = gma_crtc_restore,
988 .cursor_set = gma_crtc_cursor_set, 986 .cursor_set = gma_crtc_cursor_set,
989 .cursor_move = gma_crtc_cursor_move, 987 .cursor_move = gma_crtc_cursor_move,
990 .gamma_set = gma_crtc_gamma_set, 988 .gamma_set = gma_crtc_gamma_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 17cea400ae32..7bb1f1aff932 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -2020,7 +2020,8 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
2020 encoder = &gma_encoder->base; 2020 encoder = &gma_encoder->base;
2021 2021
2022 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); 2022 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
2023 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); 2023 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
2024 DRM_MODE_ENCODER_TMDS, NULL);
2024 2025
2025 gma_connector_attach_encoder(gma_connector, gma_encoder); 2026 gma_connector_attach_encoder(gma_connector, gma_encoder);
2026 2027
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 6b1d3340ba14..ddf2d7700759 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -270,8 +270,6 @@ static const struct drm_connector_helper_funcs
270 270
271static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { 271static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
272 .dpms = drm_helper_connector_dpms, 272 .dpms = drm_helper_connector_dpms,
273 .save = cdv_hdmi_save,
274 .restore = cdv_hdmi_restore,
275 .detect = cdv_hdmi_detect, 273 .detect = cdv_hdmi_detect,
276 .fill_modes = drm_helper_probe_single_connector_modes, 274 .fill_modes = drm_helper_probe_single_connector_modes,
277 .set_property = cdv_hdmi_set_property, 275 .set_property = cdv_hdmi_set_property,
@@ -306,13 +304,16 @@ void cdv_hdmi_init(struct drm_device *dev,
306 304
307 connector = &gma_connector->base; 305 connector = &gma_connector->base;
308 connector->polled = DRM_CONNECTOR_POLL_HPD; 306 connector->polled = DRM_CONNECTOR_POLL_HPD;
307 gma_connector->save = cdv_hdmi_save;
308 gma_connector->restore = cdv_hdmi_restore;
309
309 encoder = &gma_encoder->base; 310 encoder = &gma_encoder->base;
310 drm_connector_init(dev, connector, 311 drm_connector_init(dev, connector,
311 &cdv_hdmi_connector_funcs, 312 &cdv_hdmi_connector_funcs,
312 DRM_MODE_CONNECTOR_DVID); 313 DRM_MODE_CONNECTOR_DVID);
313 314
314 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 315 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
315 DRM_MODE_ENCODER_TMDS); 316 DRM_MODE_ENCODER_TMDS, NULL);
316 317
317 gma_connector_attach_encoder(gma_connector, gma_encoder); 318 gma_connector_attach_encoder(gma_connector, gma_encoder);
318 gma_encoder->type = INTEL_OUTPUT_HDMI; 319 gma_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 211069b2b951..813ef23a8054 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -530,8 +530,6 @@ static const struct drm_connector_helper_funcs
530 530
531static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { 531static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
532 .dpms = drm_helper_connector_dpms, 532 .dpms = drm_helper_connector_dpms,
533 .save = cdv_intel_lvds_save,
534 .restore = cdv_intel_lvds_restore,
535 .detect = cdv_intel_lvds_detect, 533 .detect = cdv_intel_lvds_detect,
536 .fill_modes = drm_helper_probe_single_connector_modes, 534 .fill_modes = drm_helper_probe_single_connector_modes,
537 .set_property = cdv_intel_lvds_set_property, 535 .set_property = cdv_intel_lvds_set_property,
@@ -643,6 +641,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
643 gma_encoder->dev_priv = lvds_priv; 641 gma_encoder->dev_priv = lvds_priv;
644 642
645 connector = &gma_connector->base; 643 connector = &gma_connector->base;
644 gma_connector->save = cdv_intel_lvds_save;
645 gma_connector->restore = cdv_intel_lvds_restore;
646 encoder = &gma_encoder->base; 646 encoder = &gma_encoder->base;
647 647
648 648
@@ -652,7 +652,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
652 652
653 drm_encoder_init(dev, encoder, 653 drm_encoder_init(dev, encoder,
654 &cdv_intel_lvds_enc_funcs, 654 &cdv_intel_lvds_enc_funcs,
655 DRM_MODE_ENCODER_LVDS); 655 DRM_MODE_ENCODER_LVDS, NULL);
656 656
657 657
658 gma_connector_attach_encoder(gma_connector, gma_encoder); 658 gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index 265ad0de44a6..e2ab858122f9 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -546,6 +546,8 @@ const struct psb_ops mdfld_chip_ops = {
546 546
547 .save_regs = mdfld_save_registers, 547 .save_regs = mdfld_save_registers,
548 .restore_regs = mdfld_restore_registers, 548 .restore_regs = mdfld_restore_registers,
549 .save_crtc = gma_crtc_save,
550 .restore_crtc = gma_crtc_restore,
549 .power_down = mdfld_power_down, 551 .power_down = mdfld_power_down,
550 .power_up = mdfld_power_up, 552 .power_up = mdfld_power_up,
551}; 553};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4813e03f5ee..7cd87a0c2385 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -821,14 +821,18 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
821 struct drm_device *dev = dsi_config->dev; 821 struct drm_device *dev = dsi_config->dev;
822 struct drm_psb_private *dev_priv = dev->dev_private; 822 struct drm_psb_private *dev_priv = dev->dev_private;
823 int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); 823 int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
824
825 u32 pipeconf_reg = PIPEACONF; 824 u32 pipeconf_reg = PIPEACONF;
826 u32 dspcntr_reg = DSPACNTR; 825 u32 dspcntr_reg = DSPACNTR;
826 u32 pipeconf, dspcntr;
827 827
828 u32 pipeconf = dev_priv->pipeconf[pipe];
829 u32 dspcntr = dev_priv->dspcntr[pipe];
830 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; 828 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
831 829
830 if (WARN_ON(pipe < 0))
831 return;
832
833 pipeconf = dev_priv->pipeconf[pipe];
834 dspcntr = dev_priv->dspcntr[pipe];
835
832 if (pipe) { 836 if (pipe) {
833 pipeconf_reg = PIPECCONF; 837 pipeconf_reg = PIPECCONF;
834 dspcntr_reg = DSPCCNTR; 838 dspcntr_reg = DSPCCNTR;
@@ -994,7 +998,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
994 drm_encoder_init(dev, 998 drm_encoder_init(dev,
995 encoder, 999 encoder,
996 p_funcs->encoder_funcs, 1000 p_funcs->encoder_funcs,
997 DRM_MODE_ENCODER_LVDS); 1001 DRM_MODE_ENCODER_LVDS, NULL);
998 drm_encoder_helper_add(encoder, 1002 drm_encoder_helper_add(encoder,
999 p_funcs->encoder_helper_funcs); 1003 p_funcs->encoder_helper_funcs);
1000 1004
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 89f705c3a5eb..d758f4cc6805 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -405,8 +405,6 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
405/*DSI connector funcs*/ 405/*DSI connector funcs*/
406static const struct drm_connector_funcs mdfld_dsi_connector_funcs = { 406static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
407 .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms, 407 .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
408 .save = mdfld_dsi_connector_save,
409 .restore = mdfld_dsi_connector_restore,
410 .detect = mdfld_dsi_connector_detect, 408 .detect = mdfld_dsi_connector_detect,
411 .fill_modes = drm_helper_probe_single_connector_modes, 409 .fill_modes = drm_helper_probe_single_connector_modes,
412 .set_property = mdfld_dsi_connector_set_property, 410 .set_property = mdfld_dsi_connector_set_property,
@@ -563,6 +561,9 @@ void mdfld_dsi_output_init(struct drm_device *dev,
563 561
564 562
565 connector = &dsi_connector->base.base; 563 connector = &dsi_connector->base.base;
564 dsi_connector->base.save = mdfld_dsi_connector_save;
565 dsi_connector->base.restore = mdfld_dsi_connector_restore;
566
566 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, 567 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
567 DRM_MODE_CONNECTOR_LVDS); 568 DRM_MODE_CONNECTOR_LVDS);
568 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); 569 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 368a03ae3010..ba30b43a3412 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -568,6 +568,8 @@ const struct psb_ops oaktrail_chip_ops = {
568 568
569 .save_regs = oaktrail_save_display_registers, 569 .save_regs = oaktrail_save_display_registers,
570 .restore_regs = oaktrail_restore_display_registers, 570 .restore_regs = oaktrail_restore_display_registers,
571 .save_crtc = gma_crtc_save,
572 .restore_crtc = gma_crtc_restore,
571 .power_down = oaktrail_power_down, 573 .power_down = oaktrail_power_down,
572 .power_up = oaktrail_power_up, 574 .power_up = oaktrail_power_up,
573 575
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2310d879cdc2..2d18499d6060 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -654,7 +654,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
654 654
655 drm_encoder_init(dev, encoder, 655 drm_encoder_init(dev, encoder,
656 &oaktrail_hdmi_enc_funcs, 656 &oaktrail_hdmi_enc_funcs,
657 DRM_MODE_ENCODER_TMDS); 657 DRM_MODE_ENCODER_TMDS, NULL);
658 658
659 gma_connector_attach_encoder(gma_connector, gma_encoder); 659 gma_connector_attach_encoder(gma_connector, gma_encoder);
660 660
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 83bbc271bcfb..f7038f12ac76 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -323,7 +323,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
323 DRM_MODE_CONNECTOR_LVDS); 323 DRM_MODE_CONNECTOR_LVDS);
324 324
325 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 325 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
326 DRM_MODE_ENCODER_LVDS); 326 DRM_MODE_ENCODER_LVDS, NULL);
327 327
328 gma_connector_attach_encoder(gma_connector, gma_encoder); 328 gma_connector_attach_encoder(gma_connector, gma_encoder);
329 gma_encoder->type = INTEL_OUTPUT_LVDS; 329 gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index b6b135fcd59c..bea8578846d1 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -187,7 +187,7 @@ static bool gma_resume_pci(struct pci_dev *pdev)
187 */ 187 */
188int gma_power_suspend(struct device *_dev) 188int gma_power_suspend(struct device *_dev)
189{ 189{
190 struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); 190 struct pci_dev *pdev = to_pci_dev(_dev);
191 struct drm_device *dev = pci_get_drvdata(pdev); 191 struct drm_device *dev = pci_get_drvdata(pdev);
192 struct drm_psb_private *dev_priv = dev->dev_private; 192 struct drm_psb_private *dev_priv = dev->dev_private;
193 193
@@ -214,7 +214,7 @@ int gma_power_suspend(struct device *_dev)
214 */ 214 */
215int gma_power_resume(struct device *_dev) 215int gma_power_resume(struct device *_dev)
216{ 216{
217 struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); 217 struct pci_dev *pdev = to_pci_dev(_dev);
218 struct drm_device *dev = pci_get_drvdata(pdev); 218 struct drm_device *dev = pci_get_drvdata(pdev);
219 219
220 mutex_lock(&power_mutex); 220 mutex_lock(&power_mutex);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 07df7d4eea72..dc0f8527570c 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -181,7 +181,7 @@ static int psb_save_display_registers(struct drm_device *dev)
181{ 181{
182 struct drm_psb_private *dev_priv = dev->dev_private; 182 struct drm_psb_private *dev_priv = dev->dev_private;
183 struct drm_crtc *crtc; 183 struct drm_crtc *crtc;
184 struct drm_connector *connector; 184 struct gma_connector *connector;
185 struct psb_state *regs = &dev_priv->regs.psb; 185 struct psb_state *regs = &dev_priv->regs.psb;
186 186
187 /* Display arbitration control + watermarks */ 187 /* Display arbitration control + watermarks */
@@ -198,12 +198,12 @@ static int psb_save_display_registers(struct drm_device *dev)
198 drm_modeset_lock_all(dev); 198 drm_modeset_lock_all(dev);
199 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 199 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
200 if (drm_helper_crtc_in_use(crtc)) 200 if (drm_helper_crtc_in_use(crtc))
201 crtc->funcs->save(crtc); 201 dev_priv->ops->save_crtc(crtc);
202 } 202 }
203 203
204 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 204 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
205 if (connector->funcs->save) 205 if (connector->save)
206 connector->funcs->save(connector); 206 connector->save(&connector->base);
207 207
208 drm_modeset_unlock_all(dev); 208 drm_modeset_unlock_all(dev);
209 return 0; 209 return 0;
@@ -219,7 +219,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
219{ 219{
220 struct drm_psb_private *dev_priv = dev->dev_private; 220 struct drm_psb_private *dev_priv = dev->dev_private;
221 struct drm_crtc *crtc; 221 struct drm_crtc *crtc;
222 struct drm_connector *connector; 222 struct gma_connector *connector;
223 struct psb_state *regs = &dev_priv->regs.psb; 223 struct psb_state *regs = &dev_priv->regs.psb;
224 224
225 /* Display arbitration + watermarks */ 225 /* Display arbitration + watermarks */
@@ -238,11 +238,11 @@ static int psb_restore_display_registers(struct drm_device *dev)
238 drm_modeset_lock_all(dev); 238 drm_modeset_lock_all(dev);
239 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 239 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
240 if (drm_helper_crtc_in_use(crtc)) 240 if (drm_helper_crtc_in_use(crtc))
241 crtc->funcs->restore(crtc); 241 dev_priv->ops->restore_crtc(crtc);
242 242
243 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 243 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
244 if (connector->funcs->restore) 244 if (connector->restore)
245 connector->funcs->restore(connector); 245 connector->restore(&connector->base);
246 246
247 drm_modeset_unlock_all(dev); 247 drm_modeset_unlock_all(dev);
248 return 0; 248 return 0;
@@ -354,6 +354,8 @@ const struct psb_ops psb_chip_ops = {
354 .init_pm = psb_init_pm, 354 .init_pm = psb_init_pm,
355 .save_regs = psb_save_display_registers, 355 .save_regs = psb_save_display_registers,
356 .restore_regs = psb_restore_display_registers, 356 .restore_regs = psb_restore_display_registers,
357 .save_crtc = gma_crtc_save,
358 .restore_crtc = gma_crtc_restore,
357 .power_down = psb_power_down, 359 .power_down = psb_power_down,
358 .power_up = psb_power_up, 360 .power_up = psb_power_up,
359}; 361};
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 3bd2c726dd61..b74372760d7f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -653,6 +653,8 @@ struct psb_ops {
653 void (*init_pm)(struct drm_device *dev); 653 void (*init_pm)(struct drm_device *dev);
654 int (*save_regs)(struct drm_device *dev); 654 int (*save_regs)(struct drm_device *dev);
655 int (*restore_regs)(struct drm_device *dev); 655 int (*restore_regs)(struct drm_device *dev);
656 void (*save_crtc)(struct drm_crtc *crtc);
657 void (*restore_crtc)(struct drm_crtc *crtc);
656 int (*power_up)(struct drm_device *dev); 658 int (*power_up)(struct drm_device *dev);
657 int (*power_down)(struct drm_device *dev); 659 int (*power_down)(struct drm_device *dev);
658 void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc); 660 void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6659da88fe5b..dcdbc37e55e1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -439,8 +439,6 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
439}; 439};
440 440
441const struct drm_crtc_funcs psb_intel_crtc_funcs = { 441const struct drm_crtc_funcs psb_intel_crtc_funcs = {
442 .save = gma_crtc_save,
443 .restore = gma_crtc_restore,
444 .cursor_set = gma_crtc_cursor_set, 442 .cursor_set = gma_crtc_cursor_set,
445 .cursor_move = gma_crtc_cursor_move, 443 .cursor_move = gma_crtc_cursor_move,
446 .gamma_set = gma_crtc_gamma_set, 444 .gamma_set = gma_crtc_gamma_set,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 860dd2177ca1..2a3b7c684db2 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -140,6 +140,9 @@ struct gma_encoder {
140struct gma_connector { 140struct gma_connector {
141 struct drm_connector base; 141 struct drm_connector base;
142 struct gma_encoder *encoder; 142 struct gma_encoder *encoder;
143
144 void (*save)(struct drm_connector *connector);
145 void (*restore)(struct drm_connector *connector);
143}; 146};
144 147
145struct psb_intel_crtc_state { 148struct psb_intel_crtc_state {
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ce0645d0c1e5..b1b93317d054 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -653,8 +653,6 @@ const struct drm_connector_helper_funcs
653 653
654const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { 654const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
655 .dpms = drm_helper_connector_dpms, 655 .dpms = drm_helper_connector_dpms,
656 .save = psb_intel_lvds_save,
657 .restore = psb_intel_lvds_restore,
658 .detect = psb_intel_lvds_detect, 656 .detect = psb_intel_lvds_detect,
659 .fill_modes = drm_helper_probe_single_connector_modes, 657 .fill_modes = drm_helper_probe_single_connector_modes,
660 .set_property = psb_intel_lvds_set_property, 658 .set_property = psb_intel_lvds_set_property,
@@ -715,6 +713,9 @@ void psb_intel_lvds_init(struct drm_device *dev,
715 gma_encoder->dev_priv = lvds_priv; 713 gma_encoder->dev_priv = lvds_priv;
716 714
717 connector = &gma_connector->base; 715 connector = &gma_connector->base;
716 gma_connector->save = psb_intel_lvds_save;
717 gma_connector->restore = psb_intel_lvds_restore;
718
718 encoder = &gma_encoder->base; 719 encoder = &gma_encoder->base;
719 drm_connector_init(dev, connector, 720 drm_connector_init(dev, connector,
720 &psb_intel_lvds_connector_funcs, 721 &psb_intel_lvds_connector_funcs,
@@ -722,7 +723,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
722 723
723 drm_encoder_init(dev, encoder, 724 drm_encoder_init(dev, encoder,
724 &psb_intel_lvds_enc_funcs, 725 &psb_intel_lvds_enc_funcs,
725 DRM_MODE_ENCODER_LVDS); 726 DRM_MODE_ENCODER_LVDS, NULL);
726 727
727 gma_connector_attach_encoder(gma_connector, gma_encoder); 728 gma_connector_attach_encoder(gma_connector, gma_encoder);
728 gma_encoder->type = INTEL_OUTPUT_LVDS; 729 gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 58529cea575d..e787d376ba67 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1837,8 +1837,6 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1837 1837
1838static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1838static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1839 .dpms = drm_helper_connector_dpms, 1839 .dpms = drm_helper_connector_dpms,
1840 .save = psb_intel_sdvo_save,
1841 .restore = psb_intel_sdvo_restore,
1842 .detect = psb_intel_sdvo_detect, 1840 .detect = psb_intel_sdvo_detect,
1843 .fill_modes = drm_helper_probe_single_connector_modes, 1841 .fill_modes = drm_helper_probe_single_connector_modes,
1844 .set_property = psb_intel_sdvo_set_property, 1842 .set_property = psb_intel_sdvo_set_property,
@@ -2021,6 +2019,9 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
2021 connector->base.base.doublescan_allowed = 0; 2019 connector->base.base.doublescan_allowed = 0;
2022 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2020 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2023 2021
2022 connector->base.save = psb_intel_sdvo_save;
2023 connector->base.restore = psb_intel_sdvo_restore;
2024
2024 gma_connector_attach_encoder(&connector->base, &encoder->base); 2025 gma_connector_attach_encoder(&connector->base, &encoder->base);
2025 drm_connector_register(&connector->base.base); 2026 drm_connector_register(&connector->base.base);
2026} 2027}
@@ -2525,7 +2526,8 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2525 /* encoder type will be decided later */ 2526 /* encoder type will be decided later */
2526 gma_encoder = &psb_intel_sdvo->base; 2527 gma_encoder = &psb_intel_sdvo->base;
2527 gma_encoder->type = INTEL_OUTPUT_SDVO; 2528 gma_encoder->type = INTEL_OUTPUT_SDVO;
2528 drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0); 2529 drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs,
2530 0, NULL);
2529 2531
2530 /* Read the regs to test if we can talk to the device */ 2532 /* Read the regs to test if we can talk to the device */
2531 for (i = 0; i < 0x40; i++) { 2533 for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..533d1e3d4a99 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -752,7 +752,7 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
752 adv7511->f_tmds = mode->clock; 752 adv7511->f_tmds = mode->clock;
753} 753}
754 754
755static struct drm_encoder_slave_funcs adv7511_encoder_funcs = { 755static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
756 .dpms = adv7511_encoder_dpms, 756 .dpms = adv7511_encoder_dpms,
757 .mode_valid = adv7511_encoder_mode_valid, 757 .mode_valid = adv7511_encoder_mode_valid,
758 .mode_set = adv7511_encoder_mode_set, 758 .mode_set = adv7511_encoder_mode_set,
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index d9a72c96e56c..90db5f4dcce5 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -371,7 +371,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
371 return 0; 371 return 0;
372} 372}
373 373
374static struct drm_encoder_slave_funcs ch7006_encoder_funcs = { 374static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
375 .set_config = ch7006_encoder_set_config, 375 .set_config = ch7006_encoder_set_config,
376 .destroy = ch7006_encoder_destroy, 376 .destroy = ch7006_encoder_destroy,
377 .dpms = ch7006_encoder_dpms, 377 .dpms = ch7006_encoder_dpms,
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 002ce7874332..c400428f6c8c 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -341,7 +341,7 @@ sil164_encoder_destroy(struct drm_encoder *encoder)
341 drm_i2c_encoder_destroy(encoder); 341 drm_i2c_encoder_destroy(encoder);
342} 342}
343 343
344static struct drm_encoder_slave_funcs sil164_encoder_funcs = { 344static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
345 .set_config = sil164_encoder_set_config, 345 .set_config = sil164_encoder_set_config,
346 .destroy = sil164_encoder_destroy, 346 .destroy = sil164_encoder_destroy,
347 .dpms = sil164_encoder_dpms, 347 .dpms = sil164_encoder_dpms,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 896b6aaf8c4d..34e38749a817 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -22,6 +22,7 @@
22#include <sound/asoundef.h> 22#include <sound/asoundef.h>
23 23
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include <drm/drm_atomic_helper.h>
25#include <drm/drm_crtc_helper.h> 26#include <drm/drm_crtc_helper.h>
26#include <drm/drm_edid.h> 27#include <drm/drm_edid.h>
27#include <drm/drm_of.h> 28#include <drm/drm_of.h>
@@ -855,18 +856,6 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
855 priv->dpms = mode; 856 priv->dpms = mode;
856} 857}
857 858
858static void
859tda998x_encoder_save(struct drm_encoder *encoder)
860{
861 DBG("");
862}
863
864static void
865tda998x_encoder_restore(struct drm_encoder *encoder)
866{
867 DBG("");
868}
869
870static bool 859static bool
871tda998x_encoder_mode_fixup(struct drm_encoder *encoder, 860tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
872 const struct drm_display_mode *mode, 861 const struct drm_display_mode *mode,
@@ -878,7 +867,10 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
878static int tda998x_connector_mode_valid(struct drm_connector *connector, 867static int tda998x_connector_mode_valid(struct drm_connector *connector,
879 struct drm_display_mode *mode) 868 struct drm_display_mode *mode)
880{ 869{
881 if (mode->clock > 150000) 870 /* TDA19988 dotclock can go up to 165MHz */
871 struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
872
873 if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
882 return MODE_CLOCK_HIGH; 874 return MODE_CLOCK_HIGH;
883 if (mode->htotal >= BIT(13)) 875 if (mode->htotal >= BIT(13))
884 return MODE_BAD_HVALUE; 876 return MODE_BAD_HVALUE;
@@ -1351,8 +1343,6 @@ static void tda998x_encoder_commit(struct drm_encoder *encoder)
1351 1343
1352static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = { 1344static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
1353 .dpms = tda998x_encoder_dpms, 1345 .dpms = tda998x_encoder_dpms,
1354 .save = tda998x_encoder_save,
1355 .restore = tda998x_encoder_restore,
1356 .mode_fixup = tda998x_encoder_mode_fixup, 1346 .mode_fixup = tda998x_encoder_mode_fixup,
1357 .prepare = tda998x_encoder_prepare, 1347 .prepare = tda998x_encoder_prepare,
1358 .commit = tda998x_encoder_commit, 1348 .commit = tda998x_encoder_commit,
@@ -1393,10 +1383,13 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
1393} 1383}
1394 1384
1395static const struct drm_connector_funcs tda998x_connector_funcs = { 1385static const struct drm_connector_funcs tda998x_connector_funcs = {
1396 .dpms = drm_helper_connector_dpms, 1386 .dpms = drm_atomic_helper_connector_dpms,
1387 .reset = drm_atomic_helper_connector_reset,
1397 .fill_modes = drm_helper_probe_single_connector_modes, 1388 .fill_modes = drm_helper_probe_single_connector_modes,
1398 .detect = tda998x_connector_detect, 1389 .detect = tda998x_connector_detect,
1399 .destroy = tda998x_connector_destroy, 1390 .destroy = tda998x_connector_destroy,
1391 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1392 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1400}; 1393};
1401 1394
1402static int tda998x_bind(struct device *dev, struct device *master, void *data) 1395static int tda998x_bind(struct device *dev, struct device *master, void *data)
@@ -1437,7 +1430,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
1437 1430
1438 drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs); 1431 drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
1439 ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, 1432 ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
1440 DRM_MODE_ENCODER_TMDS); 1433 DRM_MODE_ENCODER_TMDS, NULL);
1441 if (ret) 1434 if (ret)
1442 goto err_encoder; 1435 goto err_encoder;
1443 1436
@@ -1453,7 +1446,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
1453 if (ret) 1446 if (ret)
1454 goto err_sysfs; 1447 goto err_sysfs;
1455 1448
1456 priv->connector.encoder = &priv->encoder;
1457 drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder); 1449 drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
1458 1450
1459 return 0; 1451 return 0;
@@ -1472,6 +1464,7 @@ static void tda998x_unbind(struct device *dev, struct device *master,
1472{ 1464{
1473 struct tda998x_priv *priv = dev_get_drvdata(dev); 1465 struct tda998x_priv *priv = dev_get_drvdata(dev);
1474 1466
1467 drm_connector_unregister(&priv->connector);
1475 drm_connector_cleanup(&priv->connector); 1468 drm_connector_cleanup(&priv->connector);
1476 drm_encoder_cleanup(&priv->encoder); 1469 drm_encoder_cleanup(&priv->encoder);
1477 tda998x_destroy(priv); 1470 tda998x_destroy(priv);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 44a896ce32e6..a0f5659032fc 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -406,6 +406,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
406 if (ret) 406 if (ret)
407 goto cleanup_gem_stolen; 407 goto cleanup_gem_stolen;
408 408
409 intel_setup_gmbus(dev);
410
409 /* Important: The output setup functions called by modeset_init need 411 /* Important: The output setup functions called by modeset_init need
410 * working irqs for e.g. gmbus and dp aux transfers. */ 412 * working irqs for e.g. gmbus and dp aux transfers. */
411 intel_modeset_init(dev); 413 intel_modeset_init(dev);
@@ -455,6 +457,7 @@ cleanup_gem:
455cleanup_irq: 457cleanup_irq:
456 intel_guc_ucode_fini(dev); 458 intel_guc_ucode_fini(dev);
457 drm_irq_uninstall(dev); 459 drm_irq_uninstall(dev);
460 intel_teardown_gmbus(dev);
458cleanup_gem_stolen: 461cleanup_gem_stolen:
459 i915_gem_cleanup_stolen(dev); 462 i915_gem_cleanup_stolen(dev);
460cleanup_vga_switcheroo: 463cleanup_vga_switcheroo:
@@ -1029,7 +1032,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1029 1032
1030 /* Try to make sure MCHBAR is enabled before poking at it */ 1033 /* Try to make sure MCHBAR is enabled before poking at it */
1031 intel_setup_mchbar(dev); 1034 intel_setup_mchbar(dev);
1032 intel_setup_gmbus(dev);
1033 intel_opregion_setup(dev); 1035 intel_opregion_setup(dev);
1034 1036
1035 i915_gem_load(dev); 1037 i915_gem_load(dev);
@@ -1102,7 +1104,6 @@ out_gem_unload:
1102 if (dev->pdev->msi_enabled) 1104 if (dev->pdev->msi_enabled)
1103 pci_disable_msi(dev->pdev); 1105 pci_disable_msi(dev->pdev);
1104 1106
1105 intel_teardown_gmbus(dev);
1106 intel_teardown_mchbar(dev); 1107 intel_teardown_mchbar(dev);
1107 pm_qos_remove_request(&dev_priv->pm_qos); 1108 pm_qos_remove_request(&dev_priv->pm_qos);
1108 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); 1109 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
@@ -1204,7 +1205,6 @@ int i915_driver_unload(struct drm_device *dev)
1204 1205
1205 intel_csr_ucode_fini(dev_priv); 1206 intel_csr_ucode_fini(dev_priv);
1206 1207
1207 intel_teardown_gmbus(dev);
1208 intel_teardown_mchbar(dev); 1208 intel_teardown_mchbar(dev);
1209 1209
1210 destroy_workqueue(dev_priv->hotplug.dp_wq); 1210 destroy_workqueue(dev_priv->hotplug.dp_wq);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2c24ff394b2a..ddc21d4b388d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2994,6 +2994,10 @@ i915_gem_idle_work_handler(struct work_struct *work)
2994 if (!list_empty(&ring->request_list)) 2994 if (!list_empty(&ring->request_list))
2995 return; 2995 return;
2996 2996
2997 /* we probably should sync with hangcheck here, using cancel_work_sync.
2998 * Also locking seems to be fubar here, ring->request_list is protected
2999 * by dev->struct_mutex. */
3000
2997 intel_mark_idle(dev); 3001 intel_mark_idle(dev);
2998 3002
2999 if (mutex_trylock(&dev->struct_mutex)) { 3003 if (mutex_trylock(&dev->struct_mutex)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 556a458d669e..0a988895165f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -855,31 +855,31 @@ enum skl_disp_power_wells {
855 * 855 *
856 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is 856 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
857 * digital port D (CHV) or port A (BXT). 857 * digital port D (CHV) or port A (BXT).
858 */
859/*
860 * Dual channel PHY (VLV/CHV/BXT)
861 * ---------------------------------
862 * | CH0 | CH1 |
863 * | CMN/PLL/REF | CMN/PLL/REF |
864 * |---------------|---------------| Display PHY
865 * | PCS01 | PCS23 | PCS01 | PCS23 |
866 * |-------|-------|-------|-------|
867 * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
868 * ---------------------------------
869 * | DDI0 | DDI1 | DP/HDMI ports
870 * ---------------------------------
871 * 858 *
872 * Single channel PHY (CHV/BXT) 859 *
873 * ----------------- 860 * Dual channel PHY (VLV/CHV/BXT)
874 * | CH0 | 861 * ---------------------------------
875 * | CMN/PLL/REF | 862 * | CH0 | CH1 |
876 * |---------------| Display PHY 863 * | CMN/PLL/REF | CMN/PLL/REF |
877 * | PCS01 | PCS23 | 864 * |---------------|---------------| Display PHY
878 * |-------|-------| 865 * | PCS01 | PCS23 | PCS01 | PCS23 |
879 * |TX0|TX1|TX2|TX3| 866 * |-------|-------|-------|-------|
880 * ----------------- 867 * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
881 * | DDI2 | DP/HDMI port 868 * ---------------------------------
882 * ----------------- 869 * | DDI0 | DDI1 | DP/HDMI ports
870 * ---------------------------------
871 *
872 * Single channel PHY (CHV/BXT)
873 * -----------------
874 * | CH0 |
875 * | CMN/PLL/REF |
876 * |---------------| Display PHY
877 * | PCS01 | PCS23 |
878 * |-------|-------|
879 * |TX0|TX1|TX2|TX3|
880 * -----------------
881 * | DDI2 | DP/HDMI port
882 * -----------------
883 */ 883 */
884#define DPIO_DEVFN 0 884#define DPIO_DEVFN 0
885 885
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index cef359958c73..9c89df1af036 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -824,7 +824,7 @@ void intel_crt_init(struct drm_device *dev)
824 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 824 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
825 825
826 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, 826 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
827 DRM_MODE_ENCODER_DAC); 827 DRM_MODE_ENCODER_DAC, NULL);
828 828
829 intel_connector_attach_encoder(intel_connector, &crt->base); 829 intel_connector_attach_encoder(intel_connector, &crt->base);
830 830
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 25d9e5ccdebd..1f9a3687b540 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3266,7 +3266,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3266 encoder = &intel_encoder->base; 3266 encoder = &intel_encoder->base;
3267 3267
3268 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 3268 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
3269 DRM_MODE_ENCODER_TMDS); 3269 DRM_MODE_ENCODER_TMDS, NULL);
3270 3270
3271 intel_encoder->compute_config = intel_ddi_compute_config; 3271 intel_encoder->compute_config = intel_ddi_compute_config;
3272 intel_encoder->enable = intel_enable_ddi; 3272 intel_encoder->enable = intel_enable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c532c3d605ac..ccb3e3f47450 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6534,13 +6534,11 @@ static void intel_connector_check_state(struct intel_connector *connector)
6534 6534
6535int intel_connector_init(struct intel_connector *connector) 6535int intel_connector_init(struct intel_connector *connector)
6536{ 6536{
6537 struct drm_connector_state *connector_state; 6537 drm_atomic_helper_connector_reset(&connector->base);
6538 6538
6539 connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL); 6539 if (!connector->base.state)
6540 if (!connector_state)
6541 return -ENOMEM; 6540 return -ENOMEM;
6542 6541
6543 connector->base.state = connector_state;
6544 return 0; 6542 return 0;
6545} 6543}
6546 6544
@@ -14152,7 +14150,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14152 drm_universal_plane_init(dev, &primary->base, 0, 14150 drm_universal_plane_init(dev, &primary->base, 0,
14153 &intel_plane_funcs, 14151 &intel_plane_funcs,
14154 intel_primary_formats, num_formats, 14152 intel_primary_formats, num_formats,
14155 DRM_PLANE_TYPE_PRIMARY); 14153 DRM_PLANE_TYPE_PRIMARY, NULL);
14156 14154
14157 if (INTEL_INFO(dev)->gen >= 4) 14155 if (INTEL_INFO(dev)->gen >= 4)
14158 intel_create_rotation_property(dev, primary); 14156 intel_create_rotation_property(dev, primary);
@@ -14303,7 +14301,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14303 &intel_plane_funcs, 14301 &intel_plane_funcs,
14304 intel_cursor_formats, 14302 intel_cursor_formats,
14305 ARRAY_SIZE(intel_cursor_formats), 14303 ARRAY_SIZE(intel_cursor_formats),
14306 DRM_PLANE_TYPE_CURSOR); 14304 DRM_PLANE_TYPE_CURSOR, NULL);
14307 14305
14308 if (INTEL_INFO(dev)->gen >= 4) { 14306 if (INTEL_INFO(dev)->gen >= 4) {
14309 if (!dev->mode_config.rotation_property) 14307 if (!dev->mode_config.rotation_property)
@@ -14380,7 +14378,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14380 goto fail; 14378 goto fail;
14381 14379
14382 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14380 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14383 cursor, &intel_crtc_funcs); 14381 cursor, &intel_crtc_funcs, NULL);
14384 if (ret) 14382 if (ret)
14385 goto fail; 14383 goto fail;
14386 14384
@@ -15659,6 +15657,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15659 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); 15657 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15660 crtc->base.state->active = crtc->active; 15658 crtc->base.state->active = crtc->active;
15661 crtc->base.enabled = crtc->active; 15659 crtc->base.enabled = crtc->active;
15660 crtc->base.state->connector_mask = 0;
15662 15661
15663 /* Because we only establish the connector -> encoder -> 15662 /* Because we only establish the connector -> encoder ->
15664 * crtc links if something is active, this means the 15663 * crtc links if something is active, this means the
@@ -15885,7 +15884,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15885 for_each_intel_connector(dev, connector) { 15884 for_each_intel_connector(dev, connector) {
15886 if (connector->get_hw_state(connector)) { 15885 if (connector->get_hw_state(connector)) {
15887 connector->base.dpms = DRM_MODE_DPMS_ON; 15886 connector->base.dpms = DRM_MODE_DPMS_ON;
15888 connector->base.encoder = &connector->encoder->base; 15887
15888 encoder = connector->encoder;
15889 connector->base.encoder = &encoder->base;
15890
15891 if (encoder->base.crtc &&
15892 encoder->base.crtc->state->active) {
15893 /*
15894 * This has to be done during hardware readout
15895 * because anything calling .crtc_disable may
15896 * rely on the connector_mask being accurate.
15897 */
15898 encoder->base.crtc->state->connector_mask |=
15899 1 << drm_connector_index(&connector->base);
15900 }
15901
15889 } else { 15902 } else {
15890 connector->base.dpms = DRM_MODE_DPMS_OFF; 15903 connector->base.dpms = DRM_MODE_DPMS_OFF;
15891 connector->base.encoder = NULL; 15904 connector->base.encoder = NULL;
@@ -16130,6 +16143,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
16130 mutex_lock(&dev->struct_mutex); 16143 mutex_lock(&dev->struct_mutex);
16131 intel_cleanup_gt_powersave(dev); 16144 intel_cleanup_gt_powersave(dev);
16132 mutex_unlock(&dev->struct_mutex); 16145 mutex_unlock(&dev->struct_mutex);
16146
16147 intel_teardown_gmbus(dev);
16133} 16148}
16134 16149
16135/* 16150/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 3999afa440bd..17612548c58d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -6003,7 +6003,7 @@ intel_dp_init(struct drm_device *dev,
6003 encoder = &intel_encoder->base; 6003 encoder = &intel_encoder->base;
6004 6004
6005 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 6005 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6006 DRM_MODE_ENCODER_TMDS)) 6006 DRM_MODE_ENCODER_TMDS, NULL))
6007 goto err_encoder_init; 6007 goto err_encoder_init;
6008 6008
6009 intel_encoder->compute_config = intel_dp_compute_config; 6009 intel_encoder->compute_config = intel_dp_compute_config;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6f4762dc5a94..2a2ab306ad84 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -536,7 +536,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
536 drm_kms_helper_hotplug_event(dev); 536 drm_kms_helper_hotplug_event(dev);
537} 537}
538 538
539static struct drm_dp_mst_topology_cbs mst_cbs = { 539static const struct drm_dp_mst_topology_cbs mst_cbs = {
540 .add_connector = intel_dp_add_mst_connector, 540 .add_connector = intel_dp_add_mst_connector,
541 .register_connector = intel_dp_register_mst_connector, 541 .register_connector = intel_dp_register_mst_connector,
542 .destroy_connector = intel_dp_destroy_mst_connector, 542 .destroy_connector = intel_dp_destroy_mst_connector,
@@ -560,7 +560,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
560 intel_mst->primary = intel_dig_port; 560 intel_mst->primary = intel_dig_port;
561 561
562 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, 562 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
563 DRM_MODE_ENCODER_DPMST); 563 DRM_MODE_ENCODER_DPMST, NULL);
564 564
565 intel_encoder->type = INTEL_OUTPUT_DP_MST; 565 intel_encoder->type = INTEL_OUTPUT_DP_MST;
566 intel_encoder->crtc_mask = 0x7; 566 intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 328cd58cb5fe..91cef3525c93 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -1138,7 +1138,8 @@ void intel_dsi_init(struct drm_device *dev)
1138 1138
1139 connector = &intel_connector->base; 1139 connector = &intel_connector->base;
1140 1140
1141 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); 1141 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
1142 NULL);
1142 1143
1143 intel_encoder->compute_config = intel_dsi_compute_config; 1144 intel_encoder->compute_config = intel_dsi_compute_config;
1144 intel_encoder->pre_enable = intel_dsi_pre_enable; 1145 intel_encoder->pre_enable = intel_dsi_pre_enable;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7161deb2aed8..286baec979c8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -429,7 +429,7 @@ void intel_dvo_init(struct drm_device *dev)
429 429
430 intel_encoder = &intel_dvo->base; 430 intel_encoder = &intel_dvo->base;
431 drm_encoder_init(dev, &intel_encoder->base, 431 drm_encoder_init(dev, &intel_encoder->base,
432 &intel_dvo_enc_funcs, encoder_type); 432 &intel_dvo_enc_funcs, encoder_type, NULL);
433 433
434 intel_encoder->disable = intel_disable_dvo; 434 intel_encoder->disable = intel_disable_dvo;
435 intel_encoder->enable = intel_enable_dvo; 435 intel_encoder->enable = intel_enable_dvo;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d4ed7aa9927e..8698a643d027 100755
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2169,7 +2169,7 @@ void intel_hdmi_init(struct drm_device *dev,
2169 intel_encoder = &intel_dig_port->base; 2169 intel_encoder = &intel_dig_port->base;
2170 2170
2171 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 2171 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
2172 DRM_MODE_ENCODER_TMDS); 2172 DRM_MODE_ENCODER_TMDS, NULL);
2173 2173
2174 intel_encoder->compute_config = intel_hdmi_compute_config; 2174 intel_encoder->compute_config = intel_hdmi_compute_config;
2175 if (HAS_PCH_SPLIT(dev)) { 2175 if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 61f1145f6579..0da0240caf81 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -1025,7 +1025,7 @@ void intel_lvds_init(struct drm_device *dev)
1025 DRM_MODE_CONNECTOR_LVDS); 1025 DRM_MODE_CONNECTOR_LVDS);
1026 1026
1027 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 1027 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
1028 DRM_MODE_ENCODER_LVDS); 1028 DRM_MODE_ENCODER_LVDS, NULL);
1029 1029
1030 intel_encoder->enable = intel_enable_lvds; 1030 intel_encoder->enable = intel_enable_lvds;
1031 intel_encoder->pre_enable = intel_pre_enable_lvds; 1031 intel_encoder->pre_enable = intel_pre_enable_lvds;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 06679f164b3e..2e1da060b0e1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2978,7 +2978,8 @@ bool intel_sdvo_init(struct drm_device *dev,
2978 /* encoder type will be decided later */ 2978 /* encoder type will be decided later */
2979 intel_encoder = &intel_sdvo->base; 2979 intel_encoder = &intel_sdvo->base;
2980 intel_encoder->type = INTEL_OUTPUT_SDVO; 2980 intel_encoder->type = INTEL_OUTPUT_SDVO;
2981 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); 2981 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
2982 NULL);
2982 2983
2983 /* Read the regs to test if we can talk to the device */ 2984 /* Read the regs to test if we can talk to the device */
2984 for (i = 0; i < 0x40; i++) { 2985 for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 22589fce9a50..0875c8e0ec0a 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1116,7 +1116,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1116 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, 1116 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1117 &intel_plane_funcs, 1117 &intel_plane_funcs,
1118 plane_formats, num_plane_formats, 1118 plane_formats, num_plane_formats,
1119 DRM_PLANE_TYPE_OVERLAY); 1119 DRM_PLANE_TYPE_OVERLAY, NULL);
1120 if (ret) { 1120 if (ret) {
1121 kfree(intel_plane); 1121 kfree(intel_plane);
1122 goto out; 1122 goto out;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6bea78944cd6..948cbff6c62e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1645,7 +1645,7 @@ intel_tv_init(struct drm_device *dev)
1645 DRM_MODE_CONNECTOR_SVIDEO); 1645 DRM_MODE_CONNECTOR_SVIDEO);
1646 1646
1647 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1647 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1648 DRM_MODE_ENCODER_TVDAC); 1648 DRM_MODE_ENCODER_TVDAC, NULL);
1649 1649
1650 intel_encoder->compute_config = intel_tv_compute_config; 1650 intel_encoder->compute_config = intel_tv_compute_config;
1651 intel_encoder->get_config = intel_tv_get_config; 1651 intel_encoder->get_config = intel_tv_get_config;
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 98605ea2ad9d..063825fecbe2 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -137,7 +137,7 @@ static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
137 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24); 137 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
138} 138}
139 139
140static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { 140static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
141 .mode_fixup = dw_hdmi_imx_encoder_mode_fixup, 141 .mode_fixup = dw_hdmi_imx_encoder_mode_fixup,
142 .mode_set = dw_hdmi_imx_encoder_mode_set, 142 .mode_set = dw_hdmi_imx_encoder_mode_set,
143 .prepare = dw_hdmi_imx_encoder_prepare, 143 .prepare = dw_hdmi_imx_encoder_prepare,
@@ -145,7 +145,7 @@ static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
145 .disable = dw_hdmi_imx_encoder_disable, 145 .disable = dw_hdmi_imx_encoder_disable,
146}; 146};
147 147
148static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { 148static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
149 .destroy = drm_encoder_cleanup, 149 .destroy = drm_encoder_cleanup,
150}; 150};
151 151
@@ -251,7 +251,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
251 251
252 drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); 252 drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
253 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, 253 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
254 DRM_MODE_ENCODER_TMDS); 254 DRM_MODE_ENCODER_TMDS, NULL);
255 255
256 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 256 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
257} 257}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 882cf3d4b7a8..2f57d7967417 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -39,13 +39,12 @@ struct imx_drm_component {
39struct imx_drm_device { 39struct imx_drm_device {
40 struct drm_device *drm; 40 struct drm_device *drm;
41 struct imx_drm_crtc *crtc[MAX_CRTC]; 41 struct imx_drm_crtc *crtc[MAX_CRTC];
42 int pipes; 42 unsigned int pipes;
43 struct drm_fbdev_cma *fbhelper; 43 struct drm_fbdev_cma *fbhelper;
44}; 44};
45 45
46struct imx_drm_crtc { 46struct imx_drm_crtc {
47 struct drm_crtc *crtc; 47 struct drm_crtc *crtc;
48 int pipe;
49 struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs; 48 struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs;
50}; 49};
51 50
@@ -54,9 +53,9 @@ static int legacyfb_depth = 16;
54module_param(legacyfb_depth, int, 0444); 53module_param(legacyfb_depth, int, 0444);
55#endif 54#endif
56 55
57int imx_drm_crtc_id(struct imx_drm_crtc *crtc) 56unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
58{ 57{
59 return crtc->pipe; 58 return drm_crtc_index(crtc->crtc);
60} 59}
61EXPORT_SYMBOL_GPL(imx_drm_crtc_id); 60EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
62 61
@@ -124,19 +123,19 @@ EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
124 123
125int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) 124int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
126{ 125{
127 return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); 126 return drm_crtc_vblank_get(imx_drm_crtc->crtc);
128} 127}
129EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); 128EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
130 129
131void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) 130void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
132{ 131{
133 drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); 132 drm_crtc_vblank_put(imx_drm_crtc->crtc);
134} 133}
135EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); 134EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
136 135
137void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) 136void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
138{ 137{
139 drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); 138 drm_crtc_handle_vblank(imx_drm_crtc->crtc);
140} 139}
141EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); 140EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
142 141
@@ -215,7 +214,7 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
215 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); 214 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
216} 215}
217 216
218static struct drm_mode_config_funcs imx_drm_mode_config_funcs = { 217static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
219 .fb_create = drm_fb_cma_create, 218 .fb_create = drm_fb_cma_create,
220 .output_poll_changed = imx_drm_output_poll_changed, 219 .output_poll_changed = imx_drm_output_poll_changed,
221}; 220};
@@ -306,6 +305,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
306 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); 305 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
307 legacyfb_depth = 16; 306 legacyfb_depth = 16;
308 } 307 }
308 drm_helper_disable_unused_functions(drm);
309 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, 309 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
310 drm->mode_config.num_crtc, MAX_CRTC); 310 drm->mode_config.num_crtc, MAX_CRTC);
311 if (IS_ERR(imxdrm->fbhelper)) { 311 if (IS_ERR(imxdrm->fbhelper)) {
@@ -356,12 +356,11 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
356 return -ENOMEM; 356 return -ENOMEM;
357 357
358 imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs; 358 imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
359 imx_drm_crtc->pipe = imxdrm->pipes++;
360 imx_drm_crtc->crtc = crtc; 359 imx_drm_crtc->crtc = crtc;
361 360
362 crtc->port = port; 361 crtc->port = port;
363 362
364 imxdrm->crtc[imx_drm_crtc->pipe] = imx_drm_crtc; 363 imxdrm->crtc[imxdrm->pipes++] = imx_drm_crtc;
365 364
366 *new_crtc = imx_drm_crtc; 365 *new_crtc = imx_drm_crtc;
367 366
@@ -373,12 +372,12 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
373 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 372 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
374 373
375 drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, 374 drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
376 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); 375 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
377 376
378 return 0; 377 return 0;
379 378
380err_register: 379err_register:
381 imxdrm->crtc[imx_drm_crtc->pipe] = NULL; 380 imxdrm->crtc[--imxdrm->pipes] = NULL;
382 kfree(imx_drm_crtc); 381 kfree(imx_drm_crtc);
383 return ret; 382 return ret;
384} 383}
@@ -390,10 +389,11 @@ EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
390int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc) 389int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
391{ 390{
392 struct imx_drm_device *imxdrm = imx_drm_crtc->crtc->dev->dev_private; 391 struct imx_drm_device *imxdrm = imx_drm_crtc->crtc->dev->dev_private;
392 unsigned int pipe = drm_crtc_index(imx_drm_crtc->crtc);
393 393
394 drm_crtc_cleanup(imx_drm_crtc->crtc); 394 drm_crtc_cleanup(imx_drm_crtc->crtc);
395 395
396 imxdrm->crtc[imx_drm_crtc->pipe] = NULL; 396 imxdrm->crtc[pipe] = NULL;
397 397
398 kfree(imx_drm_crtc); 398 kfree(imx_drm_crtc);
399 399
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 83284b4d4be1..71cf6d9c714f 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -13,7 +13,7 @@ struct drm_plane;
13struct imx_drm_crtc; 13struct imx_drm_crtc;
14struct platform_device; 14struct platform_device;
15 15
16int imx_drm_crtc_id(struct imx_drm_crtc *crtc); 16unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
17 17
18struct imx_drm_crtc_helper_funcs { 18struct imx_drm_crtc_helper_funcs {
19 int (*enable_vblank)(struct drm_crtc *crtc); 19 int (*enable_vblank)(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index abacc8f67469..22ac482231ed 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -358,23 +358,23 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
358 drm_panel_unprepare(imx_ldb_ch->panel); 358 drm_panel_unprepare(imx_ldb_ch->panel);
359} 359}
360 360
361static struct drm_connector_funcs imx_ldb_connector_funcs = { 361static const struct drm_connector_funcs imx_ldb_connector_funcs = {
362 .dpms = drm_helper_connector_dpms, 362 .dpms = drm_helper_connector_dpms,
363 .fill_modes = drm_helper_probe_single_connector_modes, 363 .fill_modes = drm_helper_probe_single_connector_modes,
364 .detect = imx_ldb_connector_detect, 364 .detect = imx_ldb_connector_detect,
365 .destroy = imx_drm_connector_destroy, 365 .destroy = imx_drm_connector_destroy,
366}; 366};
367 367
368static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { 368static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
369 .get_modes = imx_ldb_connector_get_modes, 369 .get_modes = imx_ldb_connector_get_modes,
370 .best_encoder = imx_ldb_connector_best_encoder, 370 .best_encoder = imx_ldb_connector_best_encoder,
371}; 371};
372 372
373static struct drm_encoder_funcs imx_ldb_encoder_funcs = { 373static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
374 .destroy = imx_drm_encoder_destroy, 374 .destroy = imx_drm_encoder_destroy,
375}; 375};
376 376
377static struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { 377static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
378 .dpms = imx_ldb_encoder_dpms, 378 .dpms = imx_ldb_encoder_dpms,
379 .mode_fixup = imx_ldb_encoder_mode_fixup, 379 .mode_fixup = imx_ldb_encoder_mode_fixup,
380 .prepare = imx_ldb_encoder_prepare, 380 .prepare = imx_ldb_encoder_prepare,
@@ -422,7 +422,7 @@ static int imx_ldb_register(struct drm_device *drm,
422 drm_encoder_helper_add(&imx_ldb_ch->encoder, 422 drm_encoder_helper_add(&imx_ldb_ch->encoder,
423 &imx_ldb_encoder_helper_funcs); 423 &imx_ldb_encoder_helper_funcs);
424 drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs, 424 drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
425 DRM_MODE_ENCODER_LVDS); 425 DRM_MODE_ENCODER_LVDS, NULL);
426 426
427 drm_connector_helper_add(&imx_ldb_ch->connector, 427 drm_connector_helper_add(&imx_ldb_ch->connector,
428 &imx_ldb_connector_helper_funcs); 428 &imx_ldb_connector_helper_funcs);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index f9597146dc67..292349f0b132 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -360,24 +360,24 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
360 tve_disable(tve); 360 tve_disable(tve);
361} 361}
362 362
363static struct drm_connector_funcs imx_tve_connector_funcs = { 363static const struct drm_connector_funcs imx_tve_connector_funcs = {
364 .dpms = drm_helper_connector_dpms, 364 .dpms = drm_helper_connector_dpms,
365 .fill_modes = drm_helper_probe_single_connector_modes, 365 .fill_modes = drm_helper_probe_single_connector_modes,
366 .detect = imx_tve_connector_detect, 366 .detect = imx_tve_connector_detect,
367 .destroy = imx_drm_connector_destroy, 367 .destroy = imx_drm_connector_destroy,
368}; 368};
369 369
370static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = { 370static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
371 .get_modes = imx_tve_connector_get_modes, 371 .get_modes = imx_tve_connector_get_modes,
372 .best_encoder = imx_tve_connector_best_encoder, 372 .best_encoder = imx_tve_connector_best_encoder,
373 .mode_valid = imx_tve_connector_mode_valid, 373 .mode_valid = imx_tve_connector_mode_valid,
374}; 374};
375 375
376static struct drm_encoder_funcs imx_tve_encoder_funcs = { 376static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
377 .destroy = imx_drm_encoder_destroy, 377 .destroy = imx_drm_encoder_destroy,
378}; 378};
379 379
380static struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { 380static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
381 .dpms = imx_tve_encoder_dpms, 381 .dpms = imx_tve_encoder_dpms,
382 .mode_fixup = imx_tve_encoder_mode_fixup, 382 .mode_fixup = imx_tve_encoder_mode_fixup,
383 .prepare = imx_tve_encoder_prepare, 383 .prepare = imx_tve_encoder_prepare,
@@ -508,7 +508,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
508 508
509 drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs); 509 drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
510 drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, 510 drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
511 encoder_type); 511 encoder_type, NULL);
512 512
513 drm_connector_helper_add(&tve->connector, 513 drm_connector_helper_add(&tve->connector,
514 &imx_tve_connector_helper_funcs); 514 &imx_tve_connector_helper_funcs);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4ab841eebee1..30a57185bdb4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -270,7 +270,7 @@ static void ipu_crtc_commit(struct drm_crtc *crtc)
270 ipu_fb_enable(ipu_crtc); 270 ipu_fb_enable(ipu_crtc);
271} 271}
272 272
273static struct drm_crtc_helper_funcs ipu_helper_funcs = { 273static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
274 .dpms = ipu_crtc_dpms, 274 .dpms = ipu_crtc_dpms,
275 .mode_fixup = ipu_crtc_mode_fixup, 275 .mode_fixup = ipu_crtc_mode_fixup,
276 .mode_set = ipu_crtc_mode_set, 276 .mode_set = ipu_crtc_mode_set,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index e2ff410bab74..591ba2f1ae03 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -401,7 +401,8 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
401 401
402 ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, 402 ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
403 &ipu_plane_funcs, ipu_plane_formats, 403 &ipu_plane_funcs, ipu_plane_formats,
404 ARRAY_SIZE(ipu_plane_formats), type); 404 ARRAY_SIZE(ipu_plane_formats), type,
405 NULL);
405 if (ret) { 406 if (ret) {
406 DRM_ERROR("failed to initialize plane\n"); 407 DRM_ERROR("failed to initialize plane\n");
407 kfree(ipu_plane); 408 kfree(ipu_plane);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2e9b9f1b5cd2..0ffef172afb4 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -148,23 +148,23 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
148 drm_panel_unprepare(imxpd->panel); 148 drm_panel_unprepare(imxpd->panel);
149} 149}
150 150
151static struct drm_connector_funcs imx_pd_connector_funcs = { 151static const struct drm_connector_funcs imx_pd_connector_funcs = {
152 .dpms = drm_helper_connector_dpms, 152 .dpms = drm_helper_connector_dpms,
153 .fill_modes = drm_helper_probe_single_connector_modes, 153 .fill_modes = drm_helper_probe_single_connector_modes,
154 .detect = imx_pd_connector_detect, 154 .detect = imx_pd_connector_detect,
155 .destroy = imx_drm_connector_destroy, 155 .destroy = imx_drm_connector_destroy,
156}; 156};
157 157
158static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { 158static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
159 .get_modes = imx_pd_connector_get_modes, 159 .get_modes = imx_pd_connector_get_modes,
160 .best_encoder = imx_pd_connector_best_encoder, 160 .best_encoder = imx_pd_connector_best_encoder,
161}; 161};
162 162
163static struct drm_encoder_funcs imx_pd_encoder_funcs = { 163static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
164 .destroy = imx_drm_encoder_destroy, 164 .destroy = imx_drm_encoder_destroy,
165}; 165};
166 166
167static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = { 167static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
168 .dpms = imx_pd_encoder_dpms, 168 .dpms = imx_pd_encoder_dpms,
169 .mode_fixup = imx_pd_encoder_mode_fixup, 169 .mode_fixup = imx_pd_encoder_mode_fixup,
170 .prepare = imx_pd_encoder_prepare, 170 .prepare = imx_pd_encoder_prepare,
@@ -192,7 +192,7 @@ static int imx_pd_register(struct drm_device *drm,
192 192
193 drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs); 193 drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
194 drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs, 194 drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
195 DRM_MODE_ENCODER_NONE); 195 DRM_MODE_ENCODER_NONE, NULL);
196 196
197 drm_connector_helper_add(&imxpd->connector, 197 drm_connector_helper_add(&imxpd->connector,
198 &imx_pd_connector_helper_funcs); 198 &imx_pd_connector_helper_funcs);
@@ -204,8 +204,6 @@ static int imx_pd_register(struct drm_device *drm,
204 204
205 drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder); 205 drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
206 206
207 imxpd->connector.encoder = &imxpd->encoder;
208
209 return 0; 207 return 0;
210} 208}
211 209
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index c99d3fe12881..19c18b7af28a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1538,7 +1538,7 @@ static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
1538 encoder->possible_crtcs = 0x1; 1538 encoder->possible_crtcs = 0x1;
1539 1539
1540 drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs, 1540 drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
1541 DRM_MODE_ENCODER_DAC); 1541 DRM_MODE_ENCODER_DAC, NULL);
1542 drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs); 1542 drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
1543 1543
1544 return encoder; 1544 return encoder;
@@ -1684,13 +1684,13 @@ static void mga_connector_destroy(struct drm_connector *connector)
1684 kfree(connector); 1684 kfree(connector);
1685} 1685}
1686 1686
1687struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = { 1687static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
1688 .get_modes = mga_vga_get_modes, 1688 .get_modes = mga_vga_get_modes,
1689 .mode_valid = mga_vga_mode_valid, 1689 .mode_valid = mga_vga_mode_valid,
1690 .best_encoder = mga_connector_best_encoder, 1690 .best_encoder = mga_connector_best_encoder,
1691}; 1691};
1692 1692
1693struct drm_connector_funcs mga_vga_connector_funcs = { 1693static const struct drm_connector_funcs mga_vga_connector_funcs = {
1694 .dpms = drm_helper_connector_dpms, 1694 .dpms = drm_helper_connector_dpms,
1695 .detect = mga_vga_detect, 1695 .detect = mga_vga_detect,
1696 .fill_modes = drm_helper_probe_single_connector_modes, 1696 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 84d3ec98e6b9..215495c2780c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -54,3 +54,11 @@ config DRM_MSM_DSI_20NM_PHY
54 default y 54 default y
55 help 55 help
56 Choose this option if the 20nm DSI PHY is used on the platform. 56 Choose this option if the 20nm DSI PHY is used on the platform.
57
58config DRM_MSM_DSI_28NM_8960_PHY
59 bool "Enable DSI 28nm 8960 PHY driver in MSM DRM"
60 depends on DRM_MSM_DSI
61 default y
62 help
63 Choose this option if the 28nm DSI PHY 8960 variant is used on the
64 platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1c90290be716..065ad4138799 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -54,6 +54,7 @@ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
54msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o 54msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
55 55
56msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ 56msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
57 mdp/mdp4/mdp4_dsi_encoder.o \
57 dsi/dsi_cfg.o \ 58 dsi/dsi_cfg.o \
58 dsi/dsi_host.o \ 59 dsi/dsi_host.o \
59 dsi/dsi_manager.o \ 60 dsi/dsi_manager.o \
@@ -62,10 +63,12 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
62 63
63msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o 64msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
64msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o 65msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
66msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
65 67
66ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) 68ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
67msm-y += dsi/pll/dsi_pll.o 69msm-y += dsi/pll/dsi_pll.o
68msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o 70msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
71msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
69endif 72endif
70 73
71obj-$(CONFIG_DRM_MSM) += msm.o 74obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 1ea2df524fac..950d27d26b30 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -19,10 +19,6 @@
19 19
20#include "adreno_gpu.h" 20#include "adreno_gpu.h"
21 21
22#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
23# include <mach/kgsl.h>
24#endif
25
26#define ANY_ID 0xff 22#define ANY_ID 0xff
27 23
28bool hang_debug = false; 24bool hang_debug = false;
@@ -168,7 +164,6 @@ static void set_gpu_pdev(struct drm_device *dev,
168static int adreno_bind(struct device *dev, struct device *master, void *data) 164static int adreno_bind(struct device *dev, struct device *master, void *data)
169{ 165{
170 static struct adreno_platform_config config = {}; 166 static struct adreno_platform_config config = {};
171#ifdef CONFIG_OF
172 struct device_node *child, *node = dev->of_node; 167 struct device_node *child, *node = dev->of_node;
173 u32 val; 168 u32 val;
174 int ret; 169 int ret;
@@ -205,53 +200,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
205 return -ENXIO; 200 return -ENXIO;
206 } 201 }
207 202
208#else
209 struct kgsl_device_platform_data *pdata = dev->platform_data;
210 uint32_t version = socinfo_get_version();
211 if (cpu_is_apq8064ab()) {
212 config.fast_rate = 450000000;
213 config.slow_rate = 27000000;
214 config.bus_freq = 4;
215 config.rev = ADRENO_REV(3, 2, 1, 0);
216 } else if (cpu_is_apq8064()) {
217 config.fast_rate = 400000000;
218 config.slow_rate = 27000000;
219 config.bus_freq = 4;
220
221 if (SOCINFO_VERSION_MAJOR(version) == 2)
222 config.rev = ADRENO_REV(3, 2, 0, 2);
223 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
224 (SOCINFO_VERSION_MINOR(version) == 1))
225 config.rev = ADRENO_REV(3, 2, 0, 1);
226 else
227 config.rev = ADRENO_REV(3, 2, 0, 0);
228
229 } else if (cpu_is_msm8960ab()) {
230 config.fast_rate = 400000000;
231 config.slow_rate = 320000000;
232 config.bus_freq = 4;
233
234 if (SOCINFO_VERSION_MINOR(version) == 0)
235 config.rev = ADRENO_REV(3, 2, 1, 0);
236 else
237 config.rev = ADRENO_REV(3, 2, 1, 1);
238
239 } else if (cpu_is_msm8930()) {
240 config.fast_rate = 400000000;
241 config.slow_rate = 27000000;
242 config.bus_freq = 3;
243
244 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
245 (SOCINFO_VERSION_MINOR(version) == 2))
246 config.rev = ADRENO_REV(3, 0, 5, 2);
247 else
248 config.rev = ADRENO_REV(3, 0, 5, 0);
249
250 }
251# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
252 config.bus_scale_table = pdata->bus_scale_table;
253# endif
254#endif
255 dev->platform_data = &config; 203 dev->platform_data = &config;
256 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); 204 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
257 return 0; 205 return 0;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 5f5a3732cdf6..749fbb28ec3d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -31,10 +31,12 @@ enum msm_dsi_phy_type {
31 MSM_DSI_PHY_28NM_HPM, 31 MSM_DSI_PHY_28NM_HPM,
32 MSM_DSI_PHY_28NM_LP, 32 MSM_DSI_PHY_28NM_LP,
33 MSM_DSI_PHY_20NM, 33 MSM_DSI_PHY_20NM,
34 MSM_DSI_PHY_28NM_8960,
34 MSM_DSI_PHY_MAX 35 MSM_DSI_PHY_MAX
35}; 36};
36 37
37#define DSI_DEV_REGULATOR_MAX 8 38#define DSI_DEV_REGULATOR_MAX 8
39#define DSI_BUS_CLK_MAX 4
38 40
39/* Regulators for DSI devices */ 41/* Regulators for DSI devices */
40struct dsi_reg_entry { 42struct dsi_reg_entry {
@@ -89,7 +91,7 @@ int msm_dsi_manager_phy_enable(int id,
89 u32 *clk_pre, u32 *clk_post); 91 u32 *clk_pre, u32 *clk_post);
90void msm_dsi_manager_phy_disable(int id); 92void msm_dsi_manager_phy_disable(int id);
91int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); 93int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
92bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len); 94bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
93int msm_dsi_manager_register(struct msm_dsi *msm_dsi); 95int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
94void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); 96void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
95 97
@@ -143,7 +145,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
143int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, 145int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
144 const struct mipi_dsi_msg *msg); 146 const struct mipi_dsi_msg *msg);
145void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, 147void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
146 u32 iova, u32 len); 148 u32 dma_base, u32 len);
147int msm_dsi_host_enable(struct mipi_dsi_host *host); 149int msm_dsi_host_enable(struct mipi_dsi_host *host);
148int msm_dsi_host_disable(struct mipi_dsi_host *host); 150int msm_dsi_host_disable(struct mipi_dsi_host *host);
149int msm_dsi_host_power_on(struct mipi_dsi_host *host); 151int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 5872d5e5934f..2a827d8093a2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -13,9 +13,26 @@
13 13
14#include "dsi_cfg.h" 14#include "dsi_cfg.h"
15 15
16/* DSI v2 has not been supported by now */ 16static const char * const dsi_v2_bus_clk_names[] = {
17static const struct msm_dsi_config dsi_v2_cfg = { 17 "core_mmss_clk", "iface_clk", "bus_clk",
18};
19
20static const struct msm_dsi_config apq8064_dsi_cfg = {
18 .io_offset = 0, 21 .io_offset = 0,
22 .reg_cfg = {
23 .num = 3,
24 .regs = {
25 {"vdda", 1200000, 1200000, 100000, 100},
26 {"avdd", 3000000, 3000000, 110000, 100},
27 {"vddio", 1800000, 1800000, 100000, 100},
28 },
29 },
30 .bus_clk_names = dsi_v2_bus_clk_names,
31 .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
32};
33
34static const char * const dsi_6g_bus_clk_names[] = {
35 "mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk",
19}; 36};
20 37
21static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { 38static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
@@ -29,6 +46,12 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
29 {"vddio", 1800000, 1800000, 100000, 100}, 46 {"vddio", 1800000, 1800000, 100000, 100},
30 }, 47 },
31 }, 48 },
49 .bus_clk_names = dsi_6g_bus_clk_names,
50 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
51};
52
53static const char * const dsi_8916_bus_clk_names[] = {
54 "mdp_core_clk", "iface_clk", "bus_clk",
32}; 55};
33 56
34static const struct msm_dsi_config msm8916_dsi_cfg = { 57static const struct msm_dsi_config msm8916_dsi_cfg = {
@@ -42,6 +65,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
42 {"vddio", 1800000, 1800000, 100000, 100}, 65 {"vddio", 1800000, 1800000, 100000, 100},
43 }, 66 },
44 }, 67 },
68 .bus_clk_names = dsi_8916_bus_clk_names,
69 .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
45}; 70};
46 71
47static const struct msm_dsi_config msm8994_dsi_cfg = { 72static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -57,11 +82,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
57 {"lab_reg", -1, -1, -1, -1}, 82 {"lab_reg", -1, -1, -1, -1},
58 {"ibb_reg", -1, -1, -1, -1}, 83 {"ibb_reg", -1, -1, -1, -1},
59 }, 84 },
60 } 85 },
86 .bus_clk_names = dsi_6g_bus_clk_names,
87 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
61}; 88};
62 89
63static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { 90static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
64 {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg}, 91 {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
65 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, 92 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
66 &msm8974_apq8084_dsi_cfg}, 93 &msm8974_apq8084_dsi_cfg},
67 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1, 94 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 4cf887240177..a68c836744a3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -25,11 +25,15 @@
25#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 25#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
26#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 26#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
27 27
28#define MSM_DSI_V2_VER_MINOR_8064 0x0
29
28#define DSI_6G_REG_SHIFT 4 30#define DSI_6G_REG_SHIFT 4
29 31
30struct msm_dsi_config { 32struct msm_dsi_config {
31 u32 io_offset; 33 u32 io_offset;
32 struct dsi_reg_config reg_cfg; 34 struct dsi_reg_config reg_cfg;
35 const char * const *bus_clk_names;
36 const int num_bus_clks;
33}; 37};
34 38
35struct msm_dsi_cfg_handler { 39struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c49868efcda..48f9967b4a1b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -24,26 +24,36 @@
24#include <linux/of_graph.h> 24#include <linux/of_graph.h>
25#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/mfd/syscon.h>
28#include <linux/regmap.h>
27#include <video/mipi_display.h> 29#include <video/mipi_display.h>
28 30
29#include "dsi.h" 31#include "dsi.h"
30#include "dsi.xml.h" 32#include "dsi.xml.h"
33#include "sfpb.xml.h"
31#include "dsi_cfg.h" 34#include "dsi_cfg.h"
32 35
33static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 36static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
34{ 37{
35 u32 ver; 38 u32 ver;
36 u32 ver_6g;
37 39
38 if (!major || !minor) 40 if (!major || !minor)
39 return -EINVAL; 41 return -EINVAL;
40 42
41 /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 43 /*
44 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
42 * makes all other registers 4-byte shifted down. 45 * makes all other registers 4-byte shifted down.
46 *
47 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
48 * older, we read the DSI_VERSION register without any shift(offset
49 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
50 * the case of DSI6G, this has to be zero (the offset points to a
51 * scratch register which we never touch)
43 */ 52 */
44 ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION); 53
45 if (ver_6g == 0) { 54 ver = msm_readl(base + REG_DSI_VERSION);
46 ver = msm_readl(base + REG_DSI_VERSION); 55 if (ver) {
56 /* older dsi host, there is no register shift */
47 ver = FIELD(ver, DSI_VERSION_MAJOR); 57 ver = FIELD(ver, DSI_VERSION_MAJOR);
48 if (ver <= MSM_DSI_VER_MAJOR_V2) { 58 if (ver <= MSM_DSI_VER_MAJOR_V2) {
49 /* old versions */ 59 /* old versions */
@@ -54,12 +64,17 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
54 return -EINVAL; 64 return -EINVAL;
55 } 65 }
56 } else { 66 } else {
67 /*
68 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
69 * registers are shifted down, read DSI_VERSION again with
70 * the shifted offset
71 */
57 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); 72 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
58 ver = FIELD(ver, DSI_VERSION_MAJOR); 73 ver = FIELD(ver, DSI_VERSION_MAJOR);
59 if (ver == MSM_DSI_VER_MAJOR_6G) { 74 if (ver == MSM_DSI_VER_MAJOR_6G) {
60 /* 6G version */ 75 /* 6G version */
61 *major = ver; 76 *major = ver;
62 *minor = ver_6g; 77 *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
63 return 0; 78 return 0;
64 } else { 79 } else {
65 return -EINVAL; 80 return -EINVAL;
@@ -91,10 +106,9 @@ struct msm_dsi_host {
91 106
92 void __iomem *ctrl_base; 107 void __iomem *ctrl_base;
93 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; 108 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
94 struct clk *mdp_core_clk; 109
95 struct clk *ahb_clk; 110 struct clk *bus_clks[DSI_BUS_CLK_MAX];
96 struct clk *axi_clk; 111
97 struct clk *mmss_misc_ahb_clk;
98 struct clk *byte_clk; 112 struct clk *byte_clk;
99 struct clk *esc_clk; 113 struct clk *esc_clk;
100 struct clk *pixel_clk; 114 struct clk *pixel_clk;
@@ -102,6 +116,14 @@ struct msm_dsi_host {
102 struct clk *pixel_clk_src; 116 struct clk *pixel_clk_src;
103 117
104 u32 byte_clk_rate; 118 u32 byte_clk_rate;
119 u32 esc_clk_rate;
120
121 /* DSI v2 specific clocks */
122 struct clk *src_clk;
123 struct clk *esc_clk_src;
124 struct clk *dsi_clk_src;
125
126 u32 src_clk_rate;
105 127
106 struct gpio_desc *disp_en_gpio; 128 struct gpio_desc *disp_en_gpio;
107 struct gpio_desc *te_gpio; 129 struct gpio_desc *te_gpio;
@@ -119,9 +141,19 @@ struct msm_dsi_host {
119 struct work_struct err_work; 141 struct work_struct err_work;
120 struct workqueue_struct *workqueue; 142 struct workqueue_struct *workqueue;
121 143
144 /* DSI 6G TX buffer*/
122 struct drm_gem_object *tx_gem_obj; 145 struct drm_gem_object *tx_gem_obj;
146
147 /* DSI v2 TX buffer */
148 void *tx_buf;
149 dma_addr_t tx_buf_paddr;
150
151 int tx_size;
152
123 u8 *rx_buf; 153 u8 *rx_buf;
124 154
155 struct regmap *sfpb;
156
125 struct drm_display_mode *mode; 157 struct drm_display_mode *mode;
126 158
127 /* connected device info */ 159 /* connected device info */
@@ -165,21 +197,31 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
165 struct msm_dsi_host *msm_host) 197 struct msm_dsi_host *msm_host)
166{ 198{
167 const struct msm_dsi_cfg_handler *cfg_hnd = NULL; 199 const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
200 struct device *dev = &msm_host->pdev->dev;
168 struct regulator *gdsc_reg; 201 struct regulator *gdsc_reg;
202 struct clk *ahb_clk;
169 int ret; 203 int ret;
170 u32 major = 0, minor = 0; 204 u32 major = 0, minor = 0;
171 205
172 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); 206 gdsc_reg = regulator_get(dev, "gdsc");
173 if (IS_ERR(gdsc_reg)) { 207 if (IS_ERR(gdsc_reg)) {
174 pr_err("%s: cannot get gdsc\n", __func__); 208 pr_err("%s: cannot get gdsc\n", __func__);
175 goto exit; 209 goto exit;
176 } 210 }
211
212 ahb_clk = clk_get(dev, "iface_clk");
213 if (IS_ERR(ahb_clk)) {
214 pr_err("%s: cannot get interface clock\n", __func__);
215 goto put_gdsc;
216 }
217
177 ret = regulator_enable(gdsc_reg); 218 ret = regulator_enable(gdsc_reg);
178 if (ret) { 219 if (ret) {
179 pr_err("%s: unable to enable gdsc\n", __func__); 220 pr_err("%s: unable to enable gdsc\n", __func__);
180 goto put_gdsc; 221 goto put_clk;
181 } 222 }
182 ret = clk_prepare_enable(msm_host->ahb_clk); 223
224 ret = clk_prepare_enable(ahb_clk);
183 if (ret) { 225 if (ret) {
184 pr_err("%s: unable to enable ahb_clk\n", __func__); 226 pr_err("%s: unable to enable ahb_clk\n", __func__);
185 goto disable_gdsc; 227 goto disable_gdsc;
@@ -196,9 +238,11 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
196 DBG("%s: Version %x:%x\n", __func__, major, minor); 238 DBG("%s: Version %x:%x\n", __func__, major, minor);
197 239
198disable_clks: 240disable_clks:
199 clk_disable_unprepare(msm_host->ahb_clk); 241 clk_disable_unprepare(ahb_clk);
200disable_gdsc: 242disable_gdsc:
201 regulator_disable(gdsc_reg); 243 regulator_disable(gdsc_reg);
244put_clk:
245 clk_put(ahb_clk);
202put_gdsc: 246put_gdsc:
203 regulator_put(gdsc_reg); 247 regulator_put(gdsc_reg);
204exit: 248exit:
@@ -295,40 +339,23 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
295static int dsi_clk_init(struct msm_dsi_host *msm_host) 339static int dsi_clk_init(struct msm_dsi_host *msm_host)
296{ 340{
297 struct device *dev = &msm_host->pdev->dev; 341 struct device *dev = &msm_host->pdev->dev;
298 int ret = 0; 342 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
299 343 const struct msm_dsi_config *cfg = cfg_hnd->cfg;
300 msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); 344 int i, ret = 0;
301 if (IS_ERR(msm_host->mdp_core_clk)) { 345
302 ret = PTR_ERR(msm_host->mdp_core_clk); 346 /* get bus clocks */
303 pr_err("%s: Unable to get mdp core clk. ret=%d\n", 347 for (i = 0; i < cfg->num_bus_clks; i++) {
304 __func__, ret); 348 msm_host->bus_clks[i] = devm_clk_get(dev,
305 goto exit; 349 cfg->bus_clk_names[i]);
306 } 350 if (IS_ERR(msm_host->bus_clks[i])) {
307 351 ret = PTR_ERR(msm_host->bus_clks[i]);
308 msm_host->ahb_clk = devm_clk_get(dev, "iface_clk"); 352 pr_err("%s: Unable to get %s, ret = %d\n",
309 if (IS_ERR(msm_host->ahb_clk)) { 353 __func__, cfg->bus_clk_names[i], ret);
310 ret = PTR_ERR(msm_host->ahb_clk); 354 goto exit;
311 pr_err("%s: Unable to get mdss ahb clk. ret=%d\n", 355 }
312 __func__, ret);
313 goto exit;
314 }
315
316 msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
317 if (IS_ERR(msm_host->axi_clk)) {
318 ret = PTR_ERR(msm_host->axi_clk);
319 pr_err("%s: Unable to get axi bus clk. ret=%d\n",
320 __func__, ret);
321 goto exit;
322 }
323
324 msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
325 if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
326 ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
327 pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
328 __func__, ret);
329 goto exit;
330 } 356 }
331 357
358 /* get link and source clocks */
332 msm_host->byte_clk = devm_clk_get(dev, "byte_clk"); 359 msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
333 if (IS_ERR(msm_host->byte_clk)) { 360 if (IS_ERR(msm_host->byte_clk)) {
334 ret = PTR_ERR(msm_host->byte_clk); 361 ret = PTR_ERR(msm_host->byte_clk);
@@ -356,80 +383,85 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
356 goto exit; 383 goto exit;
357 } 384 }
358 385
359 msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src"); 386 msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
360 if (IS_ERR(msm_host->byte_clk_src)) { 387 if (!msm_host->byte_clk_src) {
361 ret = PTR_ERR(msm_host->byte_clk_src); 388 ret = -ENODEV;
362 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret); 389 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
363 msm_host->byte_clk_src = NULL;
364 goto exit; 390 goto exit;
365 } 391 }
366 392
367 msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src"); 393 msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
368 if (IS_ERR(msm_host->pixel_clk_src)) { 394 if (!msm_host->pixel_clk_src) {
369 ret = PTR_ERR(msm_host->pixel_clk_src); 395 ret = -ENODEV;
370 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret); 396 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
371 msm_host->pixel_clk_src = NULL;
372 goto exit; 397 goto exit;
373 } 398 }
374 399
400 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
401 msm_host->src_clk = devm_clk_get(dev, "src_clk");
402 if (IS_ERR(msm_host->src_clk)) {
403 ret = PTR_ERR(msm_host->src_clk);
404 pr_err("%s: can't find dsi_src_clk. ret=%d\n",
405 __func__, ret);
406 msm_host->src_clk = NULL;
407 goto exit;
408 }
409
410 msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
411 if (!msm_host->esc_clk_src) {
412 ret = -ENODEV;
413 pr_err("%s: can't get esc_clk_src. ret=%d\n",
414 __func__, ret);
415 goto exit;
416 }
417
418 msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
419 if (!msm_host->dsi_clk_src) {
420 ret = -ENODEV;
421 pr_err("%s: can't get dsi_clk_src. ret=%d\n",
422 __func__, ret);
423 }
424 }
375exit: 425exit:
376 return ret; 426 return ret;
377} 427}
378 428
379static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) 429static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
380{ 430{
381 int ret; 431 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
432 int i, ret;
382 433
383 DBG("id=%d", msm_host->id); 434 DBG("id=%d", msm_host->id);
384 435
385 ret = clk_prepare_enable(msm_host->mdp_core_clk); 436 for (i = 0; i < cfg->num_bus_clks; i++) {
386 if (ret) { 437 ret = clk_prepare_enable(msm_host->bus_clks[i]);
387 pr_err("%s: failed to enable mdp_core_clock, %d\n", 438 if (ret) {
388 __func__, ret); 439 pr_err("%s: failed to enable bus clock %d ret %d\n",
389 goto core_clk_err; 440 __func__, i, ret);
390 } 441 goto err;
391 442 }
392 ret = clk_prepare_enable(msm_host->ahb_clk);
393 if (ret) {
394 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
395 goto ahb_clk_err;
396 }
397
398 ret = clk_prepare_enable(msm_host->axi_clk);
399 if (ret) {
400 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
401 goto axi_clk_err;
402 }
403
404 ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
405 if (ret) {
406 pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
407 __func__, ret);
408 goto misc_ahb_clk_err;
409 } 443 }
410 444
411 return 0; 445 return 0;
446err:
447 for (; i > 0; i--)
448 clk_disable_unprepare(msm_host->bus_clks[i]);
412 449
413misc_ahb_clk_err:
414 clk_disable_unprepare(msm_host->axi_clk);
415axi_clk_err:
416 clk_disable_unprepare(msm_host->ahb_clk);
417ahb_clk_err:
418 clk_disable_unprepare(msm_host->mdp_core_clk);
419core_clk_err:
420 return ret; 450 return ret;
421} 451}
422 452
423static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) 453static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
424{ 454{
455 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
456 int i;
457
425 DBG(""); 458 DBG("");
426 clk_disable_unprepare(msm_host->mmss_misc_ahb_clk); 459
427 clk_disable_unprepare(msm_host->axi_clk); 460 for (i = cfg->num_bus_clks - 1; i >= 0; i--)
428 clk_disable_unprepare(msm_host->ahb_clk); 461 clk_disable_unprepare(msm_host->bus_clks[i]);
429 clk_disable_unprepare(msm_host->mdp_core_clk);
430} 462}
431 463
432static int dsi_link_clk_enable(struct msm_dsi_host *msm_host) 464static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
433{ 465{
434 int ret; 466 int ret;
435 467
@@ -476,11 +508,98 @@ error:
476 return ret; 508 return ret;
477} 509}
478 510
479static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) 511static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
480{ 512{
513 int ret;
514
515 DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
516 msm_host->mode->clock, msm_host->byte_clk_rate,
517 msm_host->esc_clk_rate, msm_host->src_clk_rate);
518
519 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
520 if (ret) {
521 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
522 goto error;
523 }
524
525 ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
526 if (ret) {
527 pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
528 goto error;
529 }
530
531 ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
532 if (ret) {
533 pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
534 goto error;
535 }
536
537 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
538 if (ret) {
539 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
540 goto error;
541 }
542
543 ret = clk_prepare_enable(msm_host->byte_clk);
544 if (ret) {
545 pr_err("%s: Failed to enable dsi byte clk\n", __func__);
546 goto error;
547 }
548
549 ret = clk_prepare_enable(msm_host->esc_clk);
550 if (ret) {
551 pr_err("%s: Failed to enable dsi esc clk\n", __func__);
552 goto esc_clk_err;
553 }
554
555 ret = clk_prepare_enable(msm_host->src_clk);
556 if (ret) {
557 pr_err("%s: Failed to enable dsi src clk\n", __func__);
558 goto src_clk_err;
559 }
560
561 ret = clk_prepare_enable(msm_host->pixel_clk);
562 if (ret) {
563 pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
564 goto pixel_clk_err;
565 }
566
567 return 0;
568
569pixel_clk_err:
570 clk_disable_unprepare(msm_host->src_clk);
571src_clk_err:
481 clk_disable_unprepare(msm_host->esc_clk); 572 clk_disable_unprepare(msm_host->esc_clk);
482 clk_disable_unprepare(msm_host->pixel_clk); 573esc_clk_err:
483 clk_disable_unprepare(msm_host->byte_clk); 574 clk_disable_unprepare(msm_host->byte_clk);
575error:
576 return ret;
577}
578
579static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
580{
581 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
582
583 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
584 return dsi_link_clk_enable_6g(msm_host);
585 else
586 return dsi_link_clk_enable_v2(msm_host);
587}
588
589static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
590{
591 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
592
593 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
594 clk_disable_unprepare(msm_host->esc_clk);
595 clk_disable_unprepare(msm_host->pixel_clk);
596 clk_disable_unprepare(msm_host->byte_clk);
597 } else {
598 clk_disable_unprepare(msm_host->pixel_clk);
599 clk_disable_unprepare(msm_host->src_clk);
600 clk_disable_unprepare(msm_host->esc_clk);
601 clk_disable_unprepare(msm_host->byte_clk);
602 }
484} 603}
485 604
486static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable) 605static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
@@ -515,6 +634,7 @@ unlock_ret:
515static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) 634static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
516{ 635{
517 struct drm_display_mode *mode = msm_host->mode; 636 struct drm_display_mode *mode = msm_host->mode;
637 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
518 u8 lanes = msm_host->lanes; 638 u8 lanes = msm_host->lanes;
519 u32 bpp = dsi_get_bpp(msm_host->format); 639 u32 bpp = dsi_get_bpp(msm_host->format);
520 u32 pclk_rate; 640 u32 pclk_rate;
@@ -534,6 +654,47 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
534 654
535 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate); 655 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
536 656
657 msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
658
659 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
660 unsigned int esc_mhz, esc_div;
661 unsigned long byte_mhz;
662
663 msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
664
665 /*
666 * esc clock is byte clock followed by a 4 bit divider,
667 * we need to find an escape clock frequency within the
668 * mipi DSI spec range within the maximum divider limit
669 * We iterate here between an escape clock frequencey
670 * between 20 Mhz to 5 Mhz and pick up the first one
671 * that can be supported by our divider
672 */
673
674 byte_mhz = msm_host->byte_clk_rate / 1000000;
675
676 for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
677 esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
678
679 /*
680 * TODO: Ideally, we shouldn't know what sort of divider
681 * is available in mmss_cc, we're just assuming that
682 * it'll always be a 4 bit divider. Need to come up with
683 * a better way here.
684 */
685 if (esc_div >= 1 && esc_div <= 16)
686 break;
687 }
688
689 if (esc_mhz < 5)
690 return -EINVAL;
691
692 msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
693
694 DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
695 msm_host->src_clk_rate);
696 }
697
537 return 0; 698 return 0;
538} 699}
539 700
@@ -835,29 +996,46 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
835static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) 996static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
836{ 997{
837 struct drm_device *dev = msm_host->dev; 998 struct drm_device *dev = msm_host->dev;
999 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
838 int ret; 1000 int ret;
839 u32 iova; 1001 u32 iova;
840 1002
841 mutex_lock(&dev->struct_mutex); 1003 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
842 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); 1004 mutex_lock(&dev->struct_mutex);
843 if (IS_ERR(msm_host->tx_gem_obj)) { 1005 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
844 ret = PTR_ERR(msm_host->tx_gem_obj); 1006 if (IS_ERR(msm_host->tx_gem_obj)) {
845 pr_err("%s: failed to allocate gem, %d\n", __func__, ret); 1007 ret = PTR_ERR(msm_host->tx_gem_obj);
846 msm_host->tx_gem_obj = NULL; 1008 pr_err("%s: failed to allocate gem, %d\n",
1009 __func__, ret);
1010 msm_host->tx_gem_obj = NULL;
1011 mutex_unlock(&dev->struct_mutex);
1012 return ret;
1013 }
1014
1015 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
847 mutex_unlock(&dev->struct_mutex); 1016 mutex_unlock(&dev->struct_mutex);
848 return ret; 1017 if (ret) {
849 } 1018 pr_err("%s: failed to get iova, %d\n", __func__, ret);
1019 return ret;
1020 }
850 1021
851 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova); 1022 if (iova & 0x07) {
852 if (ret) { 1023 pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
853 pr_err("%s: failed to get iova, %d\n", __func__, ret); 1024 return -EINVAL;
854 return ret; 1025 }
855 }
856 mutex_unlock(&dev->struct_mutex);
857 1026
858 if (iova & 0x07) { 1027 msm_host->tx_size = msm_host->tx_gem_obj->size;
859 pr_err("%s: buf NOT 8 bytes aligned\n", __func__); 1028 } else {
860 return -EINVAL; 1029 msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1030 &msm_host->tx_buf_paddr, GFP_KERNEL);
1031 if (!msm_host->tx_buf) {
1032 ret = -ENOMEM;
1033 pr_err("%s: failed to allocate tx buf, %d\n",
1034 __func__, ret);
1035 return ret;
1036 }
1037
1038 msm_host->tx_size = size;
861 } 1039 }
862 1040
863 return 0; 1041 return 0;
@@ -874,14 +1052,19 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
874 msm_host->tx_gem_obj = NULL; 1052 msm_host->tx_gem_obj = NULL;
875 mutex_unlock(&dev->struct_mutex); 1053 mutex_unlock(&dev->struct_mutex);
876 } 1054 }
1055
1056 if (msm_host->tx_buf)
1057 dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1058 msm_host->tx_buf_paddr);
877} 1059}
878 1060
879/* 1061/*
880 * prepare cmd buffer to be txed 1062 * prepare cmd buffer to be txed
881 */ 1063 */
882static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem, 1064static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
883 const struct mipi_dsi_msg *msg) 1065 const struct mipi_dsi_msg *msg)
884{ 1066{
1067 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
885 struct mipi_dsi_packet packet; 1068 struct mipi_dsi_packet packet;
886 int len; 1069 int len;
887 int ret; 1070 int ret;
@@ -894,17 +1077,20 @@ static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
894 } 1077 }
895 len = (packet.size + 3) & (~0x3); 1078 len = (packet.size + 3) & (~0x3);
896 1079
897 if (len > tx_gem->size) { 1080 if (len > msm_host->tx_size) {
898 pr_err("%s: packet size is too big\n", __func__); 1081 pr_err("%s: packet size is too big\n", __func__);
899 return -EINVAL; 1082 return -EINVAL;
900 } 1083 }
901 1084
902 data = msm_gem_vaddr(tx_gem); 1085 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
903 1086 data = msm_gem_vaddr(msm_host->tx_gem_obj);
904 if (IS_ERR(data)) { 1087 if (IS_ERR(data)) {
905 ret = PTR_ERR(data); 1088 ret = PTR_ERR(data);
906 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 1089 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
907 return ret; 1090 return ret;
1091 }
1092 } else {
1093 data = msm_host->tx_buf;
908 } 1094 }
909 1095
910 /* MSM specific command format in memory */ 1096 /* MSM specific command format in memory */
@@ -970,17 +1156,21 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
970 return msg->rx_len; 1156 return msg->rx_len;
971} 1157}
972 1158
973
974static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) 1159static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
975{ 1160{
1161 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
976 int ret; 1162 int ret;
977 u32 iova; 1163 u32 dma_base;
978 bool triggered; 1164 bool triggered;
979 1165
980 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova); 1166 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
981 if (ret) { 1167 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
982 pr_err("%s: failed to get iova: %d\n", __func__, ret); 1168 if (ret) {
983 return ret; 1169 pr_err("%s: failed to get iova: %d\n", __func__, ret);
1170 return ret;
1171 }
1172 } else {
1173 dma_base = msm_host->tx_buf_paddr;
984 } 1174 }
985 1175
986 reinit_completion(&msm_host->dma_comp); 1176 reinit_completion(&msm_host->dma_comp);
@@ -988,7 +1178,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
988 dsi_wait4video_eng_busy(msm_host); 1178 dsi_wait4video_eng_busy(msm_host);
989 1179
990 triggered = msm_dsi_manager_cmd_xfer_trigger( 1180 triggered = msm_dsi_manager_cmd_xfer_trigger(
991 msm_host->id, iova, len); 1181 msm_host->id, dma_base, len);
992 if (triggered) { 1182 if (triggered) {
993 ret = wait_for_completion_timeout(&msm_host->dma_comp, 1183 ret = wait_for_completion_timeout(&msm_host->dma_comp,
994 msecs_to_jiffies(200)); 1184 msecs_to_jiffies(200));
@@ -1060,7 +1250,7 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1060 int bllp_len = msm_host->mode->hdisplay * 1250 int bllp_len = msm_host->mode->hdisplay *
1061 dsi_get_bpp(msm_host->format) / 8; 1251 dsi_get_bpp(msm_host->format) / 8;
1062 1252
1063 len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg); 1253 len = dsi_cmd_dma_add(msm_host, msg);
1064 if (!len) { 1254 if (!len) {
1065 pr_err("%s: failed to add cmd type = 0x%x\n", 1255 pr_err("%s: failed to add cmd type = 0x%x\n",
1066 __func__, msg->type); 1256 __func__, msg->type);
@@ -1383,6 +1573,16 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1383 1573
1384 msm_host->device_node = device_node; 1574 msm_host->device_node = device_node;
1385 1575
1576 if (of_property_read_bool(np, "syscon-sfpb")) {
1577 msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1578 "syscon-sfpb");
1579 if (IS_ERR(msm_host->sfpb)) {
1580 dev_err(dev, "%s: failed to get sfpb regmap\n",
1581 __func__);
1582 return PTR_ERR(msm_host->sfpb);
1583 }
1584 }
1585
1386 return 0; 1586 return 0;
1387} 1587}
1388 1588
@@ -1408,12 +1608,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1408 goto fail; 1608 goto fail;
1409 } 1609 }
1410 1610
1411 ret = dsi_clk_init(msm_host);
1412 if (ret) {
1413 pr_err("%s: unable to initialize dsi clks\n", __func__);
1414 goto fail;
1415 }
1416
1417 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL"); 1611 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1418 if (IS_ERR(msm_host->ctrl_base)) { 1612 if (IS_ERR(msm_host->ctrl_base)) {
1419 pr_err("%s: unable to map Dsi ctrl base\n", __func__); 1613 pr_err("%s: unable to map Dsi ctrl base\n", __func__);
@@ -1437,6 +1631,12 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1437 goto fail; 1631 goto fail;
1438 } 1632 }
1439 1633
1634 ret = dsi_clk_init(msm_host);
1635 if (ret) {
1636 pr_err("%s: unable to initialize dsi clks\n", __func__);
1637 goto fail;
1638 }
1639
1440 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); 1640 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1441 if (!msm_host->rx_buf) { 1641 if (!msm_host->rx_buf) {
1442 pr_err("%s: alloc rx temp buf failed\n", __func__); 1642 pr_err("%s: alloc rx temp buf failed\n", __func__);
@@ -1750,11 +1950,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1750 return ret; 1950 return ret;
1751} 1951}
1752 1952
1753void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len) 1953void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
1954 u32 len)
1754{ 1955{
1755 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1956 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1756 1957
1757 dsi_write(msm_host, REG_DSI_DMA_BASE, iova); 1958 dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
1758 dsi_write(msm_host, REG_DSI_DMA_LEN, len); 1959 dsi_write(msm_host, REG_DSI_DMA_LEN, len);
1759 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); 1960 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
1760 1961
@@ -1766,6 +1967,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
1766 struct msm_dsi_pll *src_pll) 1967 struct msm_dsi_pll *src_pll)
1767{ 1968{
1768 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1969 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1970 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1769 struct clk *byte_clk_provider, *pixel_clk_provider; 1971 struct clk *byte_clk_provider, *pixel_clk_provider;
1770 int ret; 1972 int ret;
1771 1973
@@ -1791,6 +1993,22 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
1791 goto exit; 1993 goto exit;
1792 } 1994 }
1793 1995
1996 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
1997 ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
1998 if (ret) {
1999 pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2000 __func__, ret);
2001 goto exit;
2002 }
2003
2004 ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2005 if (ret) {
2006 pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2007 __func__, ret);
2008 goto exit;
2009 }
2010 }
2011
1794exit: 2012exit:
1795 return ret; 2013 return ret;
1796} 2014}
@@ -1828,6 +2046,20 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
1828 return 0; 2046 return 0;
1829} 2047}
1830 2048
2049static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2050{
2051 enum sfpb_ahb_arb_master_port_en en;
2052
2053 if (!msm_host->sfpb)
2054 return;
2055
2056 en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2057
2058 regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2059 SFPB_GPREG_MASTER_PORT_EN__MASK,
2060 SFPB_GPREG_MASTER_PORT_EN(en));
2061}
2062
1831int msm_dsi_host_power_on(struct mipi_dsi_host *host) 2063int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1832{ 2064{
1833 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2065 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -1840,6 +2072,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1840 goto unlock_ret; 2072 goto unlock_ret;
1841 } 2073 }
1842 2074
2075 msm_dsi_sfpb_config(msm_host, true);
2076
1843 ret = dsi_calc_clk_rate(msm_host); 2077 ret = dsi_calc_clk_rate(msm_host);
1844 if (ret) { 2078 if (ret) {
1845 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2079 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
@@ -1862,7 +2096,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1862 dsi_phy_sw_reset(msm_host); 2096 dsi_phy_sw_reset(msm_host);
1863 ret = msm_dsi_manager_phy_enable(msm_host->id, 2097 ret = msm_dsi_manager_phy_enable(msm_host->id,
1864 msm_host->byte_clk_rate * 8, 2098 msm_host->byte_clk_rate * 8,
1865 clk_get_rate(msm_host->esc_clk), 2099 msm_host->esc_clk_rate,
1866 &clk_pre, &clk_post); 2100 &clk_pre, &clk_post);
1867 dsi_bus_clk_disable(msm_host); 2101 dsi_bus_clk_disable(msm_host);
1868 if (ret) { 2102 if (ret) {
@@ -1927,6 +2161,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
1927 2161
1928 dsi_host_regulator_disable(msm_host); 2162 dsi_host_regulator_disable(msm_host);
1929 2163
2164 msm_dsi_sfpb_config(msm_host, false);
2165
1930 DBG("-"); 2166 DBG("-");
1931 2167
1932 msm_host->power_on = false; 2168 msm_host->power_on = false;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0455ff75074a..58ba7ec17f51 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -774,7 +774,7 @@ restore_host0:
774 return ret; 774 return ret;
775} 775}
776 776
777bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len) 777bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
778{ 778{
779 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 779 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
780 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0); 780 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
@@ -784,9 +784,9 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
784 return false; 784 return false;
785 785
786 if (IS_SYNC_NEEDED() && msm_dsi0) 786 if (IS_SYNC_NEEDED() && msm_dsi0)
787 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len); 787 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len);
788 788
789 msm_dsi_host_cmd_xfer_commit(host, iova, len); 789 msm_dsi_host_cmd_xfer_commit(host, dma_base, len);
790 790
791 return true; 791 return true;
792} 792}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f1f955f571fa..91a95fb04a4a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -277,6 +277,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
277 { .compatible = "qcom,dsi-phy-20nm", 277 { .compatible = "qcom,dsi-phy-20nm",
278 .data = &dsi_phy_20nm_cfgs }, 278 .data = &dsi_phy_20nm_cfgs },
279#endif 279#endif
280#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
281 { .compatible = "qcom,dsi-phy-28nm-8960",
282 .data = &dsi_phy_28nm_8960_cfgs },
283#endif
280 {} 284 {}
281}; 285};
282 286
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 0456b253239f..0d54ed00386d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -43,6 +43,7 @@ struct msm_dsi_phy_cfg {
43extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; 43extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
44extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; 44extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
45extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; 45extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
46extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
46 47
47struct msm_dsi_dphy_timing { 48struct msm_dsi_dphy_timing {
48 u32 clk_pre; 49 u32 clk_pre;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
new file mode 100644
index 000000000000..197b039ca1f1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -0,0 +1,195 @@
1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi_phy.h"
15#include "dsi.xml.h"
16
17static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
18 struct msm_dsi_dphy_timing *timing)
19{
20 void __iomem *base = phy->base;
21
22 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
23 DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
24 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
25 DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
26 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
27 DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
28 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
29 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
30 DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
31 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
32 DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
33 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
34 DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
35 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
36 DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
37 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
38 DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
39 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
40 DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
41 DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
42 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
43 DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
44 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
45 DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
46}
47
48static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
49{
50 void __iomem *base = phy->reg_base;
51
52 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
53 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
54 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
55 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
56 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
57 0x100);
58}
59
60static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
61{
62 void __iomem *base = phy->reg_base;
63
64 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
65 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
66 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
67 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
68 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
69}
70
71static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
72{
73 void __iomem *base = phy->reg_base;
74 u32 status;
75 int i = 5000;
76
77 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
78 0x3);
79
80 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
81 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
82 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
83 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
84 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
85
86 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
87 usleep_range(5000, 6000);
88 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
89
90 do {
91 status = dsi_phy_read(base +
92 REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
93
94 if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
95 break;
96
97 udelay(1);
98 } while (--i > 0);
99}
100
101static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
102{
103 void __iomem *base = phy->base;
104 int i;
105
106 for (i = 0; i < 4; i++) {
107 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
108 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
109 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
110 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
111 0x00);
112 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
113 0x01);
114 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
115 0x66);
116 }
117
118 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
119 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
120 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
121 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
122 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
123 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
124}
125
126static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
127 const unsigned long bit_rate, const unsigned long esc_rate)
128{
129 struct msm_dsi_dphy_timing *timing = &phy->timing;
130 void __iomem *base = phy->base;
131
132 DBG("");
133
134 if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
135 dev_err(&phy->pdev->dev,
136 "%s: D-PHY timing calculation failed\n", __func__);
137 return -EINVAL;
138 }
139
140 dsi_28nm_phy_regulator_init(phy);
141
142 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
143
144 /* strength control */
145 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
146 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
147 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
148
149 /* phy ctrl */
150 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
151 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
152 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
153 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
154
155 dsi_28nm_phy_regulator_ctrl(phy);
156
157 dsi_28nm_phy_calibration(phy);
158
159 dsi_28nm_phy_lane_config(phy);
160
161 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
162 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
163 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
164 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
165
166 dsi_28nm_dphy_set_timing(phy, timing);
167
168 return 0;
169}
170
171static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
172{
173 dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
174
175 /*
176 * Wait for the registers writes to complete in order to
177 * ensure that the phy is completely disabled
178 */
179 wmb();
180}
181
182const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
183 .type = MSM_DSI_PHY_28NM_8960,
184 .src_pll_truthtable = { {true, true}, {false, true} },
185 .reg_cfg = {
186 .num = 1,
187 .regs = {
188 {"vddio", 1800000, 1800000, 100000, 100},
189 },
190 },
191 .ops = {
192 .enable = dsi_28nm_phy_enable,
193 .disable = dsi_28nm_phy_disable,
194 },
195};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 5104fc9f9a53..5cd438f91afe 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -151,6 +151,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
151 case MSM_DSI_PHY_28NM_LP: 151 case MSM_DSI_PHY_28NM_LP:
152 pll = msm_dsi_pll_28nm_init(pdev, type, id); 152 pll = msm_dsi_pll_28nm_init(pdev, type, id);
153 break; 153 break;
154 case MSM_DSI_PHY_28NM_8960:
155 pll = msm_dsi_pll_28nm_8960_init(pdev, id);
156 break;
154 default: 157 default:
155 pll = ERR_PTR(-ENXIO); 158 pll = ERR_PTR(-ENXIO);
156 break; 159 break;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 063caa2c5740..80b6038334a6 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -93,6 +93,16 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
93 return ERR_PTR(-ENODEV); 93 return ERR_PTR(-ENODEV);
94} 94}
95#endif 95#endif
96#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
97struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
98 int id);
99#else
100struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
101 int id)
102{
103 return ERR_PTR(-ENODEV);
104}
105#endif
96 106
97#endif /* __DSI_PLL_H__ */ 107#endif /* __DSI_PLL_H__ */
98 108
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
new file mode 100644
index 000000000000..38c90e1eb002
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -0,0 +1,533 @@
1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk-provider.h>
15
16#include "dsi_pll.h"
17#include "dsi.xml.h"
18
19/*
20 * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
21 *
22 *
23 * +------+
24 * dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
25 * F * byte_clk | +------+
26 * | bit clock divider (F / 8)
27 * |
28 * | +------+
29 * o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
30 * | +------+ | (sets parent rate)
31 * | byte clock divider (F) |
32 * | |
33 * | o---> To esc RCG
34 * | (doesn't set parent rate)
35 * |
36 * | +------+
37 * o-----| DIV3 |----dsi0pll------o---> To dsi RCG
38 * +------+ | (sets parent rate)
39 * dsi clock divider (F * magic) |
40 * |
41 * o---> To pixel rcg
42 * (doesn't set parent rate)
43 */
44
45#define POLL_MAX_READS 8000
46#define POLL_TIMEOUT_US 1
47
48#define NUM_PROVIDED_CLKS 2
49
50#define VCO_REF_CLK_RATE 27000000
51#define VCO_MIN_RATE 600000000
52#define VCO_MAX_RATE 1200000000
53
54#define DSI_BYTE_PLL_CLK 0
55#define DSI_PIXEL_PLL_CLK 1
56
57#define VCO_PREF_DIV_RATIO 27
58
59struct pll_28nm_cached_state {
60 unsigned long vco_rate;
61 u8 postdiv3;
62 u8 postdiv2;
63 u8 postdiv1;
64};
65
66struct clk_bytediv {
67 struct clk_hw hw;
68 void __iomem *reg;
69};
70
71struct dsi_pll_28nm {
72 struct msm_dsi_pll base;
73
74 int id;
75 struct platform_device *pdev;
76 void __iomem *mmio;
77
78 /* custom byte clock divider */
79 struct clk_bytediv *bytediv;
80
81 /* private clocks: */
82 struct clk *clks[NUM_DSI_CLOCKS_MAX];
83 u32 num_clks;
84
85 /* clock-provider: */
86 struct clk *provided_clks[NUM_PROVIDED_CLKS];
87 struct clk_onecell_data clk_data;
88
89 struct pll_28nm_cached_state cached_state;
90};
91
92#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
93
94static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
95 int nb_tries, int timeout_us)
96{
97 bool pll_locked = false;
98 u32 val;
99
100 while (nb_tries--) {
101 val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
102 pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
103
104 if (pll_locked)
105 break;
106
107 udelay(timeout_us);
108 }
109 DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
110
111 return pll_locked;
112}
113
114/*
115 * Clock Callbacks
116 */
117static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
118 unsigned long parent_rate)
119{
120 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
121 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
122 void __iomem *base = pll_28nm->mmio;
123 u32 val, temp, fb_divider;
124
125 DBG("rate=%lu, parent's=%lu", rate, parent_rate);
126
127 temp = rate / 10;
128 val = VCO_REF_CLK_RATE / 10;
129 fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
130 fb_divider = fb_divider / 2 - 1;
131 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
132 fb_divider & 0xff);
133
134 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
135
136 val |= (fb_divider >> 8) & 0x07;
137
138 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
139 val);
140
141 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
142
143 val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
144
145 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
146 val);
147
148 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
149 0xf);
150
151 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
152 val |= 0x7 << 4;
153 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
154 val);
155
156 return 0;
157}
158
159static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
160{
161 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
162 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
163
164 return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
165 POLL_TIMEOUT_US);
166}
167
168static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
169 unsigned long parent_rate)
170{
171 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
172 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
173 void __iomem *base = pll_28nm->mmio;
174 unsigned long vco_rate;
175 u32 status, fb_divider, temp, ref_divider;
176
177 VERB("parent_rate=%lu", parent_rate);
178
179 status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
180
181 if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
182 fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
183 fb_divider &= 0xff;
184 temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
185 fb_divider = (temp << 8) | fb_divider;
186 fb_divider += 1;
187
188 ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
189 ref_divider &= 0x3f;
190 ref_divider += 1;
191
192 /* multiply by 2 */
193 vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
194 } else {
195 vco_rate = 0;
196 }
197
198 DBG("returning vco rate = %lu", vco_rate);
199
200 return vco_rate;
201}
202
203static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
204 .round_rate = msm_dsi_pll_helper_clk_round_rate,
205 .set_rate = dsi_pll_28nm_clk_set_rate,
206 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
207 .prepare = msm_dsi_pll_helper_clk_prepare,
208 .unprepare = msm_dsi_pll_helper_clk_unprepare,
209 .is_enabled = dsi_pll_28nm_clk_is_enabled,
210};
211
212/*
213 * Custom byte clock divier clk_ops
214 *
215 * This clock is the entry point to configuring the PLL. The user (dsi host)
216 * will set this clock's rate to the desired byte clock rate. The VCO lock
217 * frequency is a multiple of the byte clock rate. The multiplication factor
218 * (shown as F in the diagram above) is a function of the byte clock rate.
219 *
220 * This custom divider clock ensures that its parent (VCO) is set to the
221 * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
222 * accordingly
223 */
224#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
225
226static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
227 unsigned long parent_rate)
228{
229 struct clk_bytediv *bytediv = to_clk_bytediv(hw);
230 unsigned int div;
231
232 div = pll_read(bytediv->reg) & 0xff;
233
234 return parent_rate / (div + 1);
235}
236
237/* find multiplication factor(wrt byte clock) at which the VCO should be set */
238static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
239{
240 unsigned long bit_mhz;
241
242 /* convert to bit clock in Mhz */
243 bit_mhz = (byte_clk_rate * 8) / 1000000;
244
245 if (bit_mhz < 125)
246 return 64;
247 else if (bit_mhz < 250)
248 return 32;
249 else if (bit_mhz < 600)
250 return 16;
251 else
252 return 8;
253}
254
255static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
256 unsigned long *prate)
257{
258 unsigned long best_parent;
259 unsigned int factor;
260
261 factor = get_vco_mul_factor(rate);
262
263 best_parent = rate * factor;
264 *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
265
266 return *prate / factor;
267}
268
269static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
270 unsigned long parent_rate)
271{
272 struct clk_bytediv *bytediv = to_clk_bytediv(hw);
273 u32 val;
274 unsigned int factor;
275
276 factor = get_vco_mul_factor(rate);
277
278 val = pll_read(bytediv->reg);
279 val |= (factor - 1) & 0xff;
280 pll_write(bytediv->reg, val);
281
282 return 0;
283}
284
285/* Our special byte clock divider ops */
286static const struct clk_ops clk_bytediv_ops = {
287 .round_rate = clk_bytediv_round_rate,
288 .set_rate = clk_bytediv_set_rate,
289 .recalc_rate = clk_bytediv_recalc_rate,
290};
291
292/*
293 * PLL Callbacks
294 */
295static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
296{
297 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
298 struct device *dev = &pll_28nm->pdev->dev;
299 void __iomem *base = pll_28nm->mmio;
300 bool locked;
301 unsigned int bit_div, byte_div;
302 int max_reads = 1000, timeout_us = 100;
303 u32 val;
304
305 DBG("id=%d", pll_28nm->id);
306
307 /*
308 * before enabling the PLL, configure the bit clock divider since we
309 * don't expose it as a clock to the outside world
310 * 1: read back the byte clock divider that should already be set
311 * 2: divide by 8 to get bit clock divider
312 * 3: write it to POSTDIV1
313 */
314 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
315 byte_div = val + 1;
316 bit_div = byte_div / 8;
317
318 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
319 val &= ~0xf;
320 val |= (bit_div - 1);
321 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
322
323 /* enable the PLL */
324 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
325 DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
326
327 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
328
329 if (unlikely(!locked))
330 dev_err(dev, "DSI PLL lock failed\n");
331 else
332 DBG("DSI PLL lock success");
333
334 return locked ? 0 : -EINVAL;
335}
336
337static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
338{
339 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
340
341 DBG("id=%d", pll_28nm->id);
342 pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
343}
344
345static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
346{
347 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
348 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
349 void __iomem *base = pll_28nm->mmio;
350
351 cached_state->postdiv3 =
352 pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
353 cached_state->postdiv2 =
354 pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
355 cached_state->postdiv1 =
356 pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
357
358 cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
359}
360
361static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
362{
363 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
364 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
365 void __iomem *base = pll_28nm->mmio;
366 int ret;
367
368 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
369 cached_state->vco_rate, 0);
370 if (ret) {
371 dev_err(&pll_28nm->pdev->dev,
372 "restore vco rate failed. ret=%d\n", ret);
373 return ret;
374 }
375
376 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
377 cached_state->postdiv3);
378 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
379 cached_state->postdiv2);
380 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
381 cached_state->postdiv1);
382
383 return 0;
384}
385
386static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
387 struct clk **byte_clk_provider,
388 struct clk **pixel_clk_provider)
389{
390 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
391
392 if (byte_clk_provider)
393 *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
394 if (pixel_clk_provider)
395 *pixel_clk_provider =
396 pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
397
398 return 0;
399}
400
401static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
402{
403 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
404
405 msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
406 pll_28nm->clks, pll_28nm->num_clks);
407}
408
409static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
410{
411 char *clk_name, *parent_name, *vco_name;
412 struct clk_init_data vco_init = {
413 .parent_names = (const char *[]){ "pxo" },
414 .num_parents = 1,
415 .ops = &clk_ops_dsi_pll_28nm_vco,
416 };
417 struct device *dev = &pll_28nm->pdev->dev;
418 struct clk **clks = pll_28nm->clks;
419 struct clk **provided_clks = pll_28nm->provided_clks;
420 struct clk_bytediv *bytediv;
421 struct clk_init_data bytediv_init = { };
422 int ret, num = 0;
423
424 DBG("%d", pll_28nm->id);
425
426 bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
427 if (!bytediv)
428 return -ENOMEM;
429
430 vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
431 if (!vco_name)
432 return -ENOMEM;
433
434 parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
435 if (!parent_name)
436 return -ENOMEM;
437
438 clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
439 if (!clk_name)
440 return -ENOMEM;
441
442 pll_28nm->bytediv = bytediv;
443
444 snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
445 vco_init.name = vco_name;
446
447 pll_28nm->base.clk_hw.init = &vco_init;
448
449 clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
450
451 /* prepare and register bytediv */
452 bytediv->hw.init = &bytediv_init;
453 bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
454
455 snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
456 snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
457
458 bytediv_init.name = clk_name;
459 bytediv_init.ops = &clk_bytediv_ops;
460 bytediv_init.flags = CLK_SET_RATE_PARENT;
461 bytediv_init.parent_names = (const char * const *) &parent_name;
462 bytediv_init.num_parents = 1;
463
464 /* DIV2 */
465 clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
466 clk_register(dev, &bytediv->hw);
467
468 snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
469 /* DIV3 */
470 clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
471 clk_register_divider(dev, clk_name,
472 parent_name, 0, pll_28nm->mmio +
473 REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
474 0, 8, 0, NULL);
475
476 pll_28nm->num_clks = num;
477
478 pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
479 pll_28nm->clk_data.clks = provided_clks;
480
481 ret = of_clk_add_provider(dev->of_node,
482 of_clk_src_onecell_get, &pll_28nm->clk_data);
483 if (ret) {
484 dev_err(dev, "failed to register clk provider: %d\n", ret);
485 return ret;
486 }
487
488 return 0;
489}
490
491struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
492 int id)
493{
494 struct dsi_pll_28nm *pll_28nm;
495 struct msm_dsi_pll *pll;
496 int ret;
497
498 if (!pdev)
499 return ERR_PTR(-ENODEV);
500
501 pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
502 if (!pll_28nm)
503 return ERR_PTR(-ENOMEM);
504
505 pll_28nm->pdev = pdev;
506 pll_28nm->id = id + 1;
507
508 pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
509 if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
510 dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
511 return ERR_PTR(-ENOMEM);
512 }
513
514 pll = &pll_28nm->base;
515 pll->min_rate = VCO_MIN_RATE;
516 pll->max_rate = VCO_MAX_RATE;
517 pll->get_provider = dsi_pll_28nm_get_provider;
518 pll->destroy = dsi_pll_28nm_destroy;
519 pll->disable_seq = dsi_pll_28nm_disable_seq;
520 pll->save_state = dsi_pll_28nm_save_state;
521 pll->restore_state = dsi_pll_28nm_restore_state;
522
523 pll->en_seq_cnt = 1;
524 pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
525
526 ret = pll_28nm_register(pll_28nm);
527 if (ret) {
528 dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
529 return ERR_PTR(ret);
530 }
531
532 return pll;
533}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1f4a95eeb348..9a0989c0b4de 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -17,6 +17,8 @@
17 */ 17 */
18 18
19#include <linux/of_irq.h> 19#include <linux/of_irq.h>
20#include <linux/of_gpio.h>
21
20#include "hdmi.h" 22#include "hdmi.h"
21 23
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on) 24void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -322,8 +324,6 @@ fail:
322 * The hdmi device: 324 * The hdmi device:
323 */ 325 */
324 326
325#include <linux/of_gpio.h>
326
327#define HDMI_CFG(item, entry) \ 327#define HDMI_CFG(item, entry) \
328 .item ## _names = item ##_names_ ## entry, \ 328 .item ## _names = item ##_names_ ## entry, \
329 .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry) 329 .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
@@ -388,17 +388,6 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
388 .hpd_freq = hpd_clk_freq_8x74, 388 .hpd_freq = hpd_clk_freq_8x74,
389}; 389};
390 390
391static const struct of_device_id dt_match[] = {
392 { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
393 { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
394 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
395 { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
396 { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
397 { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
398 {}
399};
400
401#ifdef CONFIG_OF
402static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) 391static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
403{ 392{
404 int gpio = of_get_named_gpio(of_node, name, 0); 393 int gpio = of_get_named_gpio(of_node, name, 0);
@@ -413,7 +402,6 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
413 } 402 }
414 return gpio; 403 return gpio;
415} 404}
416#endif
417 405
418static int hdmi_bind(struct device *dev, struct device *master, void *data) 406static int hdmi_bind(struct device *dev, struct device *master, void *data)
419{ 407{
@@ -421,16 +409,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
421 struct msm_drm_private *priv = drm->dev_private; 409 struct msm_drm_private *priv = drm->dev_private;
422 static struct hdmi_platform_config *hdmi_cfg; 410 static struct hdmi_platform_config *hdmi_cfg;
423 struct hdmi *hdmi; 411 struct hdmi *hdmi;
424#ifdef CONFIG_OF
425 struct device_node *of_node = dev->of_node; 412 struct device_node *of_node = dev->of_node;
426 const struct of_device_id *match;
427 413
428 match = of_match_node(dt_match, of_node); 414 hdmi_cfg = (struct hdmi_platform_config *)
429 if (match && match->data) { 415 of_device_get_match_data(dev);
430 hdmi_cfg = (struct hdmi_platform_config *)match->data; 416 if (!hdmi_cfg) {
431 DBG("hdmi phy: %s", match->compatible); 417 dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
432 } else {
433 dev_err(dev, "unknown phy: %s\n", of_node->name);
434 return -ENXIO; 418 return -ENXIO;
435 } 419 }
436 420
@@ -443,55 +427,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
443 hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); 427 hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
444 hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); 428 hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
445 429
446#else
447 static struct hdmi_platform_config config = {};
448 static const char *hpd_clk_names[] = {
449 "core_clk", "master_iface_clk", "slave_iface_clk",
450 };
451 if (cpu_is_apq8064()) {
452 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
453 config.phy_init = hdmi_phy_8960_init;
454 config.hpd_reg_names = hpd_reg_names;
455 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
456 config.hpd_clk_names = hpd_clk_names;
457 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
458 config.ddc_clk_gpio = 70;
459 config.ddc_data_gpio = 71;
460 config.hpd_gpio = 72;
461 config.mux_en_gpio = -1;
462 config.mux_sel_gpio = -1;
463 } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
464 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
465 config.phy_init = hdmi_phy_8960_init;
466 config.hpd_reg_names = hpd_reg_names;
467 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
468 config.hpd_clk_names = hpd_clk_names;
469 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
470 config.ddc_clk_gpio = 100;
471 config.ddc_data_gpio = 101;
472 config.hpd_gpio = 102;
473 config.mux_en_gpio = -1;
474 config.mux_sel_gpio = -1;
475 } else if (cpu_is_msm8x60()) {
476 static const char *hpd_reg_names[] = {
477 "8901_hdmi_mvs", "8901_mpp0"
478 };
479 config.phy_init = hdmi_phy_8x60_init;
480 config.hpd_reg_names = hpd_reg_names;
481 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
482 config.hpd_clk_names = hpd_clk_names;
483 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
484 config.ddc_clk_gpio = 170;
485 config.ddc_data_gpio = 171;
486 config.hpd_gpio = 172;
487 config.mux_en_gpio = -1;
488 config.mux_sel_gpio = -1;
489 }
490 config.mmio_name = "hdmi_msm_hdmi_addr";
491 config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
492
493 hdmi_cfg = &config;
494#endif
495 dev->platform_data = hdmi_cfg; 430 dev->platform_data = hdmi_cfg;
496 431
497 hdmi = hdmi_init(to_platform_device(dev)); 432 hdmi = hdmi_init(to_platform_device(dev));
@@ -529,6 +464,16 @@ static int hdmi_dev_remove(struct platform_device *pdev)
529 return 0; 464 return 0;
530} 465}
531 466
467static const struct of_device_id dt_match[] = {
468 { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
469 { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
470 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
471 { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
472 { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
473 { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
474 {}
475};
476
532static struct platform_driver hdmi_driver = { 477static struct platform_driver hdmi_driver = {
533 .probe = hdmi_dev_probe, 478 .probe = hdmi_dev_probe,
534 .remove = hdmi_dev_remove, 479 .remove = hdmi_dev_remove,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 6ac9aa165768..28df397c3b04 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -678,7 +678,8 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
678 drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 678 drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
679 "unref cursor", unref_cursor_worker); 679 "unref cursor", unref_cursor_worker);
680 680
681 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs); 681 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
682 NULL);
682 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 683 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
683 plane->crtc = crtc; 684 plane->crtc = crtc;
684 685
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
new file mode 100644
index 000000000000..2f57e9453b67
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2014, Inforce Computing. All rights reserved.
4 *
5 * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "mdp4_kms.h"
21
22#include "drm_crtc.h"
23#include "drm_crtc_helper.h"
24
25struct mdp4_dsi_encoder {
26 struct drm_encoder base;
27 struct drm_panel *panel;
28 bool enabled;
29};
30#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base)
31
32static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
33{
34 struct msm_drm_private *priv = encoder->dev->dev_private;
35 return to_mdp4_kms(to_mdp_kms(priv->kms));
36}
37
38static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
39{
40 struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
41
42 drm_encoder_cleanup(encoder);
43 kfree(mdp4_dsi_encoder);
44}
45
46static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
47 .destroy = mdp4_dsi_encoder_destroy,
48};
49
50static bool mdp4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
51 const struct drm_display_mode *mode,
52 struct drm_display_mode *adjusted_mode)
53{
54 return true;
55}
56
57static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
58 struct drm_display_mode *mode,
59 struct drm_display_mode *adjusted_mode)
60{
61 struct mdp4_kms *mdp4_kms = get_kms(encoder);
62 uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol;
63 uint32_t display_v_start, display_v_end;
64 uint32_t hsync_start_x, hsync_end_x;
65
66 mode = adjusted_mode;
67
68 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
69 mode->base.id, mode->name,
70 mode->vrefresh, mode->clock,
71 mode->hdisplay, mode->hsync_start,
72 mode->hsync_end, mode->htotal,
73 mode->vdisplay, mode->vsync_start,
74 mode->vsync_end, mode->vtotal,
75 mode->type, mode->flags);
76
77 ctrl_pol = 0;
78 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
79 ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW;
80 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
81 ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW;
82 /* probably need to get DATA_EN polarity from panel.. */
83
84 dsi_hsync_skew = 0; /* get this from panel? */
85
86 hsync_start_x = (mode->htotal - mode->hsync_start);
87 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
88
89 vsync_period = mode->vtotal * mode->htotal;
90 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
91 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew;
92 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1;
93
94 mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL,
95 MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
96 MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal));
97 mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period);
98 mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len);
99 mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL,
100 MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) |
101 MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x));
102 mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start);
103 mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end);
104
105 mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol);
106 mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR,
107 MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY |
108 MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff));
109 mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL,
110 MDP4_DSI_ACTIVE_HCTL_START(0) |
111 MDP4_DSI_ACTIVE_HCTL_END(0));
112 mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew);
113 mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0);
114 mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0);
115 mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0);
116}
117
118static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder)
119{
120 struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
121 struct mdp4_kms *mdp4_kms = get_kms(encoder);
122
123 if (!mdp4_dsi_encoder->enabled)
124 return;
125
126 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
127
128 /*
129 * Wait for a vsync so we know the ENABLE=0 latched before
130 * the (connector) source of the vsync's gets disabled,
131 * otherwise we end up in a funny state if we re-enable
132 * before the disable latches, which results that some of
133 * the settings changes for the new modeset (like new
134 * scanout buffer) don't latch properly..
135 */
136 mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
137
138 mdp4_dsi_encoder->enabled = false;
139}
140
141static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
142{
143 struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
144 struct mdp4_kms *mdp4_kms = get_kms(encoder);
145
146 if (mdp4_dsi_encoder->enabled)
147 return;
148
149 mdp4_crtc_set_config(encoder->crtc,
150 MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
151 MDP4_DMA_CONFIG_DEFLKR_EN |
152 MDP4_DMA_CONFIG_DITHER_EN |
153 MDP4_DMA_CONFIG_R_BPC(BPC8) |
154 MDP4_DMA_CONFIG_G_BPC(BPC8) |
155 MDP4_DMA_CONFIG_B_BPC(BPC8) |
156 MDP4_DMA_CONFIG_PACK(0x21));
157
158 mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0);
159
160 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1);
161
162 mdp4_dsi_encoder->enabled = true;
163}
164
165static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
166 .mode_fixup = mdp4_dsi_encoder_mode_fixup,
167 .mode_set = mdp4_dsi_encoder_mode_set,
168 .disable = mdp4_dsi_encoder_disable,
169 .enable = mdp4_dsi_encoder_enable,
170};
171
172/* initialize encoder */
173struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
174{
175 struct drm_encoder *encoder = NULL;
176 struct mdp4_dsi_encoder *mdp4_dsi_encoder;
177 int ret;
178
179 mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
180 if (!mdp4_dsi_encoder) {
181 ret = -ENOMEM;
182 goto fail;
183 }
184
185 encoder = &mdp4_dsi_encoder->base;
186
187 drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
188 DRM_MODE_ENCODER_DSI, NULL);
189 drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
190
191 return encoder;
192
193fail:
194 if (encoder)
195 mdp4_dsi_encoder_destroy(encoder);
196
197 return ERR_PTR(ret);
198}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 89614c6a6c1b..a21df54cb50f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -262,7 +262,7 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
262 encoder = &mdp4_dtv_encoder->base; 262 encoder = &mdp4_dtv_encoder->base;
263 263
264 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, 264 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
265 DRM_MODE_ENCODER_TMDS); 265 DRM_MODE_ENCODER_TMDS, NULL);
266 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); 266 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
267 267
268 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk"); 268 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 5ed38cf548a1..a521207db8a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -29,7 +29,7 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
29 29
30static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 30static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
31{ 31{
32 DRM_ERROR("errors: %08x\n", irqstatus); 32 DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
33} 33}
34 34
35void mdp4_irq_preinstall(struct msm_kms *kms) 35void mdp4_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 077f7521a971..5a8e3d6bcbff 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -169,7 +169,14 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
169 struct drm_encoder *encoder) 169 struct drm_encoder *encoder)
170{ 170{
171 /* if we had >1 encoder, we'd need something more clever: */ 171 /* if we had >1 encoder, we'd need something more clever: */
172 return mdp4_dtv_round_pixclk(encoder, rate); 172 switch (encoder->encoder_type) {
173 case DRM_MODE_ENCODER_TMDS:
174 return mdp4_dtv_round_pixclk(encoder, rate);
175 case DRM_MODE_ENCODER_LVDS:
176 case DRM_MODE_ENCODER_DSI:
177 default:
178 return rate;
179 }
173} 180}
174 181
175static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) 182static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
@@ -240,19 +247,18 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
240 return 0; 247 return 0;
241} 248}
242 249
243#ifdef CONFIG_OF 250static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
244static struct drm_panel *detect_panel(struct drm_device *dev)
245{ 251{
246 struct device_node *endpoint, *panel_node; 252 struct device_node *endpoint, *panel_node;
247 struct device_node *np = dev->dev->of_node; 253 struct device_node *np = dev->dev->of_node;
248 struct drm_panel *panel = NULL;
249 254
250 endpoint = of_graph_get_next_endpoint(np, NULL); 255 endpoint = of_graph_get_next_endpoint(np, NULL);
251 if (!endpoint) { 256 if (!endpoint) {
252 dev_err(dev->dev, "no valid endpoint\n"); 257 DBG("no endpoint in MDP4 to fetch LVDS panel\n");
253 return ERR_PTR(-ENODEV); 258 return NULL;
254 } 259 }
255 260
261 /* don't proceed if we have an endpoint but no panel_node tied to it */
256 panel_node = of_graph_get_remote_port_parent(endpoint); 262 panel_node = of_graph_get_remote_port_parent(endpoint);
257 if (!panel_node) { 263 if (!panel_node) {
258 dev_err(dev->dev, "no valid panel node\n"); 264 dev_err(dev->dev, "no valid panel node\n");
@@ -262,132 +268,185 @@ static struct drm_panel *detect_panel(struct drm_device *dev)
262 268
263 of_node_put(endpoint); 269 of_node_put(endpoint);
264 270
265 panel = of_drm_find_panel(panel_node); 271 return panel_node;
266 if (!panel) {
267 of_node_put(panel_node);
268 return ERR_PTR(-EPROBE_DEFER);
269 }
270
271 return panel;
272} 272}
273#else
274static struct drm_panel *detect_panel(struct drm_device *dev)
275{
276 // ??? maybe use a module param to specify which panel is attached?
277}
278#endif
279 273
280static int modeset_init(struct mdp4_kms *mdp4_kms) 274static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
275 int intf_type)
281{ 276{
282 struct drm_device *dev = mdp4_kms->dev; 277 struct drm_device *dev = mdp4_kms->dev;
283 struct msm_drm_private *priv = dev->dev_private; 278 struct msm_drm_private *priv = dev->dev_private;
284 struct drm_plane *plane;
285 struct drm_crtc *crtc;
286 struct drm_encoder *encoder; 279 struct drm_encoder *encoder;
287 struct drm_connector *connector; 280 struct drm_connector *connector;
288 struct drm_panel *panel; 281 struct device_node *panel_node;
282 struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
283 int i, dsi_id;
289 int ret; 284 int ret;
290 285
291 /* construct non-private planes: */ 286 switch (intf_type) {
292 plane = mdp4_plane_init(dev, VG1, false); 287 case DRM_MODE_ENCODER_LVDS:
293 if (IS_ERR(plane)) { 288 /*
294 dev_err(dev->dev, "failed to construct plane for VG1\n"); 289 * bail out early if:
295 ret = PTR_ERR(plane); 290 * - there is no panel node (no need to initialize lcdc
296 goto fail; 291 * encoder and lvds connector), or
297 } 292 * - panel node is a bad pointer
298 priv->planes[priv->num_planes++] = plane; 293 */
294 panel_node = mdp4_detect_lcdc_panel(dev);
295 if (IS_ERR_OR_NULL(panel_node))
296 return PTR_ERR(panel_node);
297
298 encoder = mdp4_lcdc_encoder_init(dev, panel_node);
299 if (IS_ERR(encoder)) {
300 dev_err(dev->dev, "failed to construct LCDC encoder\n");
301 return PTR_ERR(encoder);
302 }
299 303
300 plane = mdp4_plane_init(dev, VG2, false); 304 /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
301 if (IS_ERR(plane)) { 305 encoder->possible_crtcs = 1 << DMA_P;
302 dev_err(dev->dev, "failed to construct plane for VG2\n");
303 ret = PTR_ERR(plane);
304 goto fail;
305 }
306 priv->planes[priv->num_planes++] = plane;
307 306
308 /* 307 connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
309 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS: 308 if (IS_ERR(connector)) {
310 */ 309 dev_err(dev->dev, "failed to initialize LVDS connector\n");
310 return PTR_ERR(connector);
311 }
311 312
312 panel = detect_panel(dev); 313 priv->encoders[priv->num_encoders++] = encoder;
313 if (IS_ERR(panel)) { 314 priv->connectors[priv->num_connectors++] = connector;
314 ret = PTR_ERR(panel);
315 dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
316 goto fail;
317 }
318 315
319 plane = mdp4_plane_init(dev, RGB2, true); 316 break;
320 if (IS_ERR(plane)) { 317 case DRM_MODE_ENCODER_TMDS:
321 dev_err(dev->dev, "failed to construct plane for RGB2\n"); 318 encoder = mdp4_dtv_encoder_init(dev);
322 ret = PTR_ERR(plane); 319 if (IS_ERR(encoder)) {
323 goto fail; 320 dev_err(dev->dev, "failed to construct DTV encoder\n");
324 } 321 return PTR_ERR(encoder);
322 }
325 323
326 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P); 324 /* DTV can be hooked to DMA_E: */
327 if (IS_ERR(crtc)) { 325 encoder->possible_crtcs = 1 << 1;
328 dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
329 ret = PTR_ERR(crtc);
330 goto fail;
331 }
332 326
333 encoder = mdp4_lcdc_encoder_init(dev, panel); 327 if (priv->hdmi) {
334 if (IS_ERR(encoder)) { 328 /* Construct bridge/connector for HDMI: */
335 dev_err(dev->dev, "failed to construct LCDC encoder\n"); 329 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
336 ret = PTR_ERR(encoder); 330 if (ret) {
337 goto fail; 331 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
338 } 332 return ret;
333 }
334 }
339 335
340 /* LCDC can be hooked to DMA_P: */ 336 priv->encoders[priv->num_encoders++] = encoder;
341 encoder->possible_crtcs = 1 << priv->num_crtcs;
342 337
343 priv->crtcs[priv->num_crtcs++] = crtc; 338 break;
344 priv->encoders[priv->num_encoders++] = encoder; 339 case DRM_MODE_ENCODER_DSI:
340 /* only DSI1 supported for now */
341 dsi_id = 0;
345 342
346 connector = mdp4_lvds_connector_init(dev, panel, encoder); 343 if (!priv->dsi[dsi_id])
347 if (IS_ERR(connector)) { 344 break;
348 ret = PTR_ERR(connector);
349 dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
350 goto fail;
351 }
352 345
353 priv->connectors[priv->num_connectors++] = connector; 346 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
347 dsi_encs[i] = mdp4_dsi_encoder_init(dev);
348 if (IS_ERR(dsi_encs[i])) {
349 ret = PTR_ERR(dsi_encs[i]);
350 dev_err(dev->dev,
351 "failed to construct DSI encoder: %d\n",
352 ret);
353 return ret;
354 }
354 355
355 /* 356 /* TODO: Add DMA_S later? */
356 * Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI: 357 dsi_encs[i]->possible_crtcs = 1 << DMA_P;
357 */ 358 priv->encoders[priv->num_encoders++] = dsi_encs[i];
359 }
358 360
359 plane = mdp4_plane_init(dev, RGB1, true); 361 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
360 if (IS_ERR(plane)) { 362 if (ret) {
361 dev_err(dev->dev, "failed to construct plane for RGB1\n"); 363 dev_err(dev->dev, "failed to initialize DSI: %d\n",
362 ret = PTR_ERR(plane); 364 ret);
363 goto fail; 365 return ret;
364 } 366 }
365 367
366 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E); 368 break;
367 if (IS_ERR(crtc)) { 369 default:
368 dev_err(dev->dev, "failed to construct crtc for DMA_E\n"); 370 dev_err(dev->dev, "Invalid or unsupported interface\n");
369 ret = PTR_ERR(crtc); 371 return -EINVAL;
370 goto fail;
371 } 372 }
372 373
373 encoder = mdp4_dtv_encoder_init(dev); 374 return 0;
374 if (IS_ERR(encoder)) { 375}
375 dev_err(dev->dev, "failed to construct DTV encoder\n"); 376
376 ret = PTR_ERR(encoder); 377static int modeset_init(struct mdp4_kms *mdp4_kms)
377 goto fail; 378{
379 struct drm_device *dev = mdp4_kms->dev;
380 struct msm_drm_private *priv = dev->dev_private;
381 struct drm_plane *plane;
382 struct drm_crtc *crtc;
383 int i, ret;
384 static const enum mdp4_pipe rgb_planes[] = {
385 RGB1, RGB2,
386 };
387 static const enum mdp4_pipe vg_planes[] = {
388 VG1, VG2,
389 };
390 static const enum mdp4_dma mdp4_crtcs[] = {
391 DMA_P, DMA_E,
392 };
393 static const char * const mdp4_crtc_names[] = {
394 "DMA_P", "DMA_E",
395 };
396 static const int mdp4_intfs[] = {
397 DRM_MODE_ENCODER_LVDS,
398 DRM_MODE_ENCODER_DSI,
399 DRM_MODE_ENCODER_TMDS,
400 };
401
402 /* construct non-private planes: */
403 for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
404 plane = mdp4_plane_init(dev, vg_planes[i], false);
405 if (IS_ERR(plane)) {
406 dev_err(dev->dev,
407 "failed to construct plane for VG%d\n", i + 1);
408 ret = PTR_ERR(plane);
409 goto fail;
410 }
411 priv->planes[priv->num_planes++] = plane;
378 } 412 }
379 413
380 /* DTV can be hooked to DMA_E: */ 414 for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
381 encoder->possible_crtcs = 1 << priv->num_crtcs; 415 plane = mdp4_plane_init(dev, rgb_planes[i], true);
416 if (IS_ERR(plane)) {
417 dev_err(dev->dev,
418 "failed to construct plane for RGB%d\n", i + 1);
419 ret = PTR_ERR(plane);
420 goto fail;
421 }
422
423 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
424 mdp4_crtcs[i]);
425 if (IS_ERR(crtc)) {
426 dev_err(dev->dev, "failed to construct crtc for %s\n",
427 mdp4_crtc_names[i]);
428 ret = PTR_ERR(crtc);
429 goto fail;
430 }
431
432 priv->crtcs[priv->num_crtcs++] = crtc;
433 }
382 434
383 priv->crtcs[priv->num_crtcs++] = crtc; 435 /*
384 priv->encoders[priv->num_encoders++] = encoder; 436 * we currently set up two relatively fixed paths:
437 *
438 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
439 * or
440 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
441 *
442 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
443 */
385 444
386 if (priv->hdmi) { 445 for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
387 /* Construct bridge/connector for HDMI: */ 446 ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
388 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
389 if (ret) { 447 if (ret) {
390 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 448 dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
449 i, ret);
391 goto fail; 450 goto fail;
392 } 451 }
393 } 452 }
@@ -558,17 +617,10 @@ fail:
558static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) 617static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
559{ 618{
560 static struct mdp4_platform_config config = {}; 619 static struct mdp4_platform_config config = {};
561#ifdef CONFIG_OF 620
562 /* TODO */ 621 /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
563 config.max_clk = 266667000; 622 config.max_clk = 266667000;
564 config.iommu = iommu_domain_alloc(&platform_bus_type); 623 config.iommu = iommu_domain_alloc(&platform_bus_type);
565#else 624
566 if (cpu_is_apq8064())
567 config.max_clk = 266667000;
568 else
569 config.max_clk = 200000000;
570
571 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
572#endif
573 return &config; 625 return &config;
574} 626}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..d2c96ef431f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -157,7 +157,7 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
157 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); 157 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
158 break; 158 break;
159 default: 159 default:
160 WARN_ON("invalid pipe"); 160 WARN(1, "invalid pipe");
161 break; 161 break;
162 } 162 }
163 163
@@ -212,10 +212,19 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
212 212
213long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate); 213long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
214struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, 214struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
215 struct drm_panel *panel); 215 struct device_node *panel_node);
216 216
217struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, 217struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
218 struct drm_panel *panel, struct drm_encoder *encoder); 218 struct device_node *panel_node, struct drm_encoder *encoder);
219
220#ifdef CONFIG_DRM_MSM_DSI
221struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
222#else
223static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
224{
225 return ERR_PTR(-ENODEV);
226}
227#endif
219 228
220#ifdef CONFIG_COMMON_CLK 229#ifdef CONFIG_COMMON_CLK
221struct clk *mpd4_lvds_pll_init(struct drm_device *dev); 230struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index 4cd6e721aa0a..cd63fedb67cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -23,6 +23,7 @@
23 23
24struct mdp4_lcdc_encoder { 24struct mdp4_lcdc_encoder {
25 struct drm_encoder base; 25 struct drm_encoder base;
26 struct device_node *panel_node;
26 struct drm_panel *panel; 27 struct drm_panel *panel;
27 struct clk *lcdc_clk; 28 struct clk *lcdc_clk;
28 unsigned long int pixclock; 29 unsigned long int pixclock;
@@ -338,7 +339,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
338 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = 339 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
339 to_mdp4_lcdc_encoder(encoder); 340 to_mdp4_lcdc_encoder(encoder);
340 struct mdp4_kms *mdp4_kms = get_kms(encoder); 341 struct mdp4_kms *mdp4_kms = get_kms(encoder);
341 struct drm_panel *panel = mdp4_lcdc_encoder->panel; 342 struct drm_panel *panel;
342 int i, ret; 343 int i, ret;
343 344
344 if (WARN_ON(!mdp4_lcdc_encoder->enabled)) 345 if (WARN_ON(!mdp4_lcdc_encoder->enabled))
@@ -346,6 +347,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
346 347
347 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); 348 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
348 349
350 panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
349 if (panel) { 351 if (panel) {
350 drm_panel_disable(panel); 352 drm_panel_disable(panel);
351 drm_panel_unprepare(panel); 353 drm_panel_unprepare(panel);
@@ -381,7 +383,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
381 to_mdp4_lcdc_encoder(encoder); 383 to_mdp4_lcdc_encoder(encoder);
382 unsigned long pc = mdp4_lcdc_encoder->pixclock; 384 unsigned long pc = mdp4_lcdc_encoder->pixclock;
383 struct mdp4_kms *mdp4_kms = get_kms(encoder); 385 struct mdp4_kms *mdp4_kms = get_kms(encoder);
384 struct drm_panel *panel = mdp4_lcdc_encoder->panel; 386 struct drm_panel *panel;
385 int i, ret; 387 int i, ret;
386 388
387 if (WARN_ON(mdp4_lcdc_encoder->enabled)) 389 if (WARN_ON(mdp4_lcdc_encoder->enabled))
@@ -414,6 +416,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
414 if (ret) 416 if (ret)
415 dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); 417 dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
416 418
419 panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
417 if (panel) { 420 if (panel) {
418 drm_panel_prepare(panel); 421 drm_panel_prepare(panel);
419 drm_panel_enable(panel); 422 drm_panel_enable(panel);
@@ -442,7 +445,7 @@ long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
442 445
443/* initialize encoder */ 446/* initialize encoder */
444struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, 447struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
445 struct drm_panel *panel) 448 struct device_node *panel_node)
446{ 449{
447 struct drm_encoder *encoder = NULL; 450 struct drm_encoder *encoder = NULL;
448 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; 451 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
@@ -455,12 +458,12 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
455 goto fail; 458 goto fail;
456 } 459 }
457 460
458 mdp4_lcdc_encoder->panel = panel; 461 mdp4_lcdc_encoder->panel_node = panel_node;
459 462
460 encoder = &mdp4_lcdc_encoder->base; 463 encoder = &mdp4_lcdc_encoder->base;
461 464
462 drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, 465 drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
463 DRM_MODE_ENCODER_LVDS); 466 DRM_MODE_ENCODER_LVDS, NULL);
464 drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); 467 drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
465 468
466 /* TODO: do we need different pll in other cases? */ 469 /* TODO: do we need different pll in other cases? */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 921185133d38..e73e1742b250 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -23,6 +23,7 @@
23struct mdp4_lvds_connector { 23struct mdp4_lvds_connector {
24 struct drm_connector base; 24 struct drm_connector base;
25 struct drm_encoder *encoder; 25 struct drm_encoder *encoder;
26 struct device_node *panel_node;
26 struct drm_panel *panel; 27 struct drm_panel *panel;
27}; 28};
28#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base) 29#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
@@ -33,6 +34,10 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
33 struct mdp4_lvds_connector *mdp4_lvds_connector = 34 struct mdp4_lvds_connector *mdp4_lvds_connector =
34 to_mdp4_lvds_connector(connector); 35 to_mdp4_lvds_connector(connector);
35 36
37 if (!mdp4_lvds_connector->panel)
38 mdp4_lvds_connector->panel =
39 of_drm_find_panel(mdp4_lvds_connector->panel_node);
40
36 return mdp4_lvds_connector->panel ? 41 return mdp4_lvds_connector->panel ?
37 connector_status_connected : 42 connector_status_connected :
38 connector_status_disconnected; 43 connector_status_disconnected;
@@ -42,10 +47,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
42{ 47{
43 struct mdp4_lvds_connector *mdp4_lvds_connector = 48 struct mdp4_lvds_connector *mdp4_lvds_connector =
44 to_mdp4_lvds_connector(connector); 49 to_mdp4_lvds_connector(connector);
45 struct drm_panel *panel = mdp4_lvds_connector->panel;
46
47 if (panel)
48 drm_panel_detach(panel);
49 50
50 drm_connector_unregister(connector); 51 drm_connector_unregister(connector);
51 drm_connector_cleanup(connector); 52 drm_connector_cleanup(connector);
@@ -60,9 +61,14 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
60 struct drm_panel *panel = mdp4_lvds_connector->panel; 61 struct drm_panel *panel = mdp4_lvds_connector->panel;
61 int ret = 0; 62 int ret = 0;
62 63
63 if (panel) 64 if (panel) {
65 drm_panel_attach(panel, connector);
66
64 ret = panel->funcs->get_modes(panel); 67 ret = panel->funcs->get_modes(panel);
65 68
69 drm_panel_detach(panel);
70 }
71
66 return ret; 72 return ret;
67} 73}
68 74
@@ -111,7 +117,7 @@ static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs
111 117
112/* initialize connector */ 118/* initialize connector */
113struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, 119struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
114 struct drm_panel *panel, struct drm_encoder *encoder) 120 struct device_node *panel_node, struct drm_encoder *encoder)
115{ 121{
116 struct drm_connector *connector = NULL; 122 struct drm_connector *connector = NULL;
117 struct mdp4_lvds_connector *mdp4_lvds_connector; 123 struct mdp4_lvds_connector *mdp4_lvds_connector;
@@ -124,7 +130,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
124 } 130 }
125 131
126 mdp4_lvds_connector->encoder = encoder; 132 mdp4_lvds_connector->encoder = encoder;
127 mdp4_lvds_connector->panel = panel; 133 mdp4_lvds_connector->panel_node = panel_node;
128 134
129 connector = &mdp4_lvds_connector->base; 135 connector = &mdp4_lvds_connector->base;
130 136
@@ -141,9 +147,6 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
141 147
142 drm_mode_connector_attach_encoder(connector, encoder); 148 drm_mode_connector_attach_encoder(connector, encoder);
143 149
144 if (panel)
145 drm_panel_attach(panel, connector);
146
147 return connector; 150 return connector;
148 151
149fail: 152fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 30d57e74c42f..9f96dfe67769 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -397,7 +397,8 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
397 397
398 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 398 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
399 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, 399 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
400 mdp4_plane->formats, mdp4_plane->nformats, type); 400 mdp4_plane->formats, mdp4_plane->nformats,
401 type, NULL);
401 if (ret) 402 if (ret)
402 goto fail; 403 goto fail;
403 404
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index bb1225aa2f75..57f73f0c120d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -553,9 +553,7 @@ fail:
553static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) 553static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
554{ 554{
555 static struct mdp5_cfg_platform config = {}; 555 static struct mdp5_cfg_platform config = {};
556#ifdef CONFIG_OF 556
557 /* TODO */
558#endif
559 config.iommu = iommu_domain_alloc(&platform_bus_type); 557 config.iommu = iommu_domain_alloc(&platform_bus_type);
560 558
561 return &config; 559 return &config;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 8e6c9b598a57..1aa21dba663d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -326,7 +326,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
326 mdp5_cmd_enc->ctl = ctl; 326 mdp5_cmd_enc->ctl = ctl;
327 327
328 drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, 328 drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
329 DRM_MODE_ENCODER_DSI); 329 DRM_MODE_ENCODER_DSI, NULL);
330 330
331 drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs); 331 drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
332 332
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 7f9f4ac88029..20cee5ce4071 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -797,7 +797,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
797 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", 797 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
798 pipe2name(mdp5_plane_pipe(plane)), id); 798 pipe2name(mdp5_plane_pipe(plane)), id);
799 799
800 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); 800 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
801 NULL);
801 802
802 drm_flip_work_init(&mdp5_crtc->unref_cursor_work, 803 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
803 "unref cursor", unref_cursor_worker); 804 "unref cursor", unref_cursor_worker);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index c9e32b08a7a0..0d737cad03a6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -293,6 +293,24 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
293 .enable = mdp5_encoder_enable, 293 .enable = mdp5_encoder_enable,
294}; 294};
295 295
296int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
297{
298 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
299 struct mdp5_kms *mdp5_kms = get_kms(encoder);
300 int intf = mdp5_encoder->intf.num;
301
302 return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
303}
304
305u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
306{
307 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
308 struct mdp5_kms *mdp5_kms = get_kms(encoder);
309 int intf = mdp5_encoder->intf.num;
310
311 return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
312}
313
296int mdp5_encoder_set_split_display(struct drm_encoder *encoder, 314int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
297 struct drm_encoder *slave_encoder) 315 struct drm_encoder *slave_encoder)
298{ 316{
@@ -354,7 +372,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
354 372
355 spin_lock_init(&mdp5_encoder->intf_lock); 373 spin_lock_init(&mdp5_encoder->intf_lock);
356 374
357 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type); 375 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
358 376
359 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 377 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
360 378
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index b0d4b53b97f4..73bc3e312fd4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -31,7 +31,7 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
31 31
32static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 32static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
33{ 33{
34 DRM_ERROR("errors: %08x\n", irqstatus); 34 DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
35} 35}
36 36
37void mdp5_irq_preinstall(struct msm_kms *kms) 37void mdp5_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index b532faa8026d..e115318402bd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -468,6 +468,127 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
468 return 0; 468 return 0;
469} 469}
470 470
471static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
472{
473 struct drm_device *dev = crtc->dev;
474 struct drm_encoder *encoder;
475
476 drm_for_each_encoder(encoder, dev)
477 if (encoder->crtc == crtc)
478 return encoder;
479
480 return NULL;
481}
482
483static int mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
484 unsigned int flags, int *vpos, int *hpos,
485 ktime_t *stime, ktime_t *etime,
486 const struct drm_display_mode *mode)
487{
488 struct msm_drm_private *priv = dev->dev_private;
489 struct drm_crtc *crtc;
490 struct drm_encoder *encoder;
491 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
492 int ret = 0;
493
494 crtc = priv->crtcs[pipe];
495 if (!crtc) {
496 DRM_ERROR("Invalid crtc %d\n", pipe);
497 return 0;
498 }
499
500 encoder = get_encoder_from_crtc(crtc);
501 if (!encoder) {
502 DRM_ERROR("no encoder found for crtc %d\n", pipe);
503 return 0;
504 }
505
506 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
507
508 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
509 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
510
511 /*
512 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
513 * the end of VFP. Translate the porch values relative to the line
514 * counter positions.
515 */
516
517 vactive_start = vsw + vbp + 1;
518
519 vactive_end = vactive_start + mode->crtc_vdisplay;
520
521 /* last scan line before VSYNC */
522 vfp_end = mode->crtc_vtotal;
523
524 if (stime)
525 *stime = ktime_get();
526
527 line = mdp5_encoder_get_linecount(encoder);
528
529 if (line < vactive_start) {
530 line -= vactive_start;
531 ret |= DRM_SCANOUTPOS_IN_VBLANK;
532 } else if (line > vactive_end) {
533 line = line - vfp_end - vactive_start;
534 ret |= DRM_SCANOUTPOS_IN_VBLANK;
535 } else {
536 line -= vactive_start;
537 }
538
539 *vpos = line;
540 *hpos = 0;
541
542 if (etime)
543 *etime = ktime_get();
544
545 return ret;
546}
547
548static int mdp5_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
549 int *max_error,
550 struct timeval *vblank_time,
551 unsigned flags)
552{
553 struct msm_drm_private *priv = dev->dev_private;
554 struct drm_crtc *crtc;
555
556 if (pipe < 0 || pipe >= priv->num_crtcs) {
557 DRM_ERROR("Invalid crtc %d\n", pipe);
558 return -EINVAL;
559 }
560
561 crtc = priv->crtcs[pipe];
562 if (!crtc) {
563 DRM_ERROR("Invalid crtc %d\n", pipe);
564 return -EINVAL;
565 }
566
567 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
568 vblank_time, flags,
569 &crtc->mode);
570}
571
572static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
573{
574 struct msm_drm_private *priv = dev->dev_private;
575 struct drm_crtc *crtc;
576 struct drm_encoder *encoder;
577
578 if (pipe < 0 || pipe >= priv->num_crtcs)
579 return 0;
580
581 crtc = priv->crtcs[pipe];
582 if (!crtc)
583 return 0;
584
585 encoder = get_encoder_from_crtc(crtc);
586 if (!encoder)
587 return 0;
588
589 return mdp5_encoder_get_framecount(encoder);
590}
591
471struct msm_kms *mdp5_kms_init(struct drm_device *dev) 592struct msm_kms *mdp5_kms_init(struct drm_device *dev)
472{ 593{
473 struct platform_device *pdev = dev->platformdev; 594 struct platform_device *pdev = dev->platformdev;
@@ -590,6 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
590 !config->hw->intf.base[i]) 711 !config->hw->intf.base[i])
591 continue; 712 continue;
592 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 713 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
714
715 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
593 } 716 }
594 mdp5_disable(mdp5_kms); 717 mdp5_disable(mdp5_kms);
595 mdelay(16); 718 mdelay(16);
@@ -635,6 +758,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
635 dev->mode_config.max_width = config->hw->lm.max_width; 758 dev->mode_config.max_width = config->hw->lm.max_width;
636 dev->mode_config.max_height = config->hw->lm.max_height; 759 dev->mode_config.max_height = config->hw->lm.max_height;
637 760
761 dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
762 dev->driver->get_scanout_position = mdp5_get_scanoutpos;
763 dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
764 dev->max_vblank_count = 0xffffffff;
765 dev->vblank_disable_immediate = true;
766
638 return kms; 767 return kms;
639 768
640fail: 769fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 84f65d415598..00730ba08a60 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -222,6 +222,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
222 struct mdp5_interface *intf, struct mdp5_ctl *ctl); 222 struct mdp5_interface *intf, struct mdp5_ctl *ctl);
223int mdp5_encoder_set_split_display(struct drm_encoder *encoder, 223int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
224 struct drm_encoder *slave_encoder); 224 struct drm_encoder *slave_encoder);
225int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
226u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
225 227
226#ifdef CONFIG_DRM_MSM_DSI 228#ifdef CONFIG_DRM_MSM_DSI
227struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, 229struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 81cd49045ffc..432c09836b0e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -904,7 +904,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
904 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 904 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
905 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 905 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
906 mdp5_plane->formats, mdp5_plane->nformats, 906 mdp5_plane->formats, mdp5_plane->nformats,
907 type); 907 type, NULL);
908 if (ret) 908 if (ret)
909 goto fail; 909 goto fail;
910 910
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b88ce514eb8e..9a30807b900b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -237,20 +237,9 @@ static int msm_unload(struct drm_device *dev)
237 237
238static int get_mdp_ver(struct platform_device *pdev) 238static int get_mdp_ver(struct platform_device *pdev)
239{ 239{
240#ifdef CONFIG_OF
241 static const struct of_device_id match_types[] = { {
242 .compatible = "qcom,mdss_mdp",
243 .data = (void *)5,
244 }, {
245 /* end node */
246 } };
247 struct device *dev = &pdev->dev; 240 struct device *dev = &pdev->dev;
248 const struct of_device_id *match; 241
249 match = of_match_node(match_types, dev->of_node); 242 return (int) (unsigned long) of_device_get_match_data(dev);
250 if (match)
251 return (int)(unsigned long)match->data;
252#endif
253 return 4;
254} 243}
255 244
256#include <linux/of_address.h> 245#include <linux/of_address.h>
@@ -258,10 +247,10 @@ static int get_mdp_ver(struct platform_device *pdev)
258static int msm_init_vram(struct drm_device *dev) 247static int msm_init_vram(struct drm_device *dev)
259{ 248{
260 struct msm_drm_private *priv = dev->dev_private; 249 struct msm_drm_private *priv = dev->dev_private;
250 struct device_node *node;
261 unsigned long size = 0; 251 unsigned long size = 0;
262 int ret = 0; 252 int ret = 0;
263 253
264#ifdef CONFIG_OF
265 /* In the device-tree world, we could have a 'memory-region' 254 /* In the device-tree world, we could have a 'memory-region'
266 * phandle, which gives us a link to our "vram". Allocating 255 * phandle, which gives us a link to our "vram". Allocating
267 * is all nicely abstracted behind the dma api, but we need 256 * is all nicely abstracted behind the dma api, but we need
@@ -278,7 +267,6 @@ static int msm_init_vram(struct drm_device *dev)
278 * as corruption on screen before we have a chance to 267 * as corruption on screen before we have a chance to
279 * load and do initial modeset) 268 * load and do initial modeset)
280 */ 269 */
281 struct device_node *node;
282 270
283 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); 271 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
284 if (node) { 272 if (node) {
@@ -288,14 +276,12 @@ static int msm_init_vram(struct drm_device *dev)
288 return ret; 276 return ret;
289 size = r.end - r.start; 277 size = r.end - r.start;
290 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); 278 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
291 } else
292#endif
293 279
294 /* if we have no IOMMU, then we need to use carveout allocator. 280 /* if we have no IOMMU, then we need to use carveout allocator.
295 * Grab the entire CMA chunk carved out in early startup in 281 * Grab the entire CMA chunk carved out in early startup in
296 * mach-msm: 282 * mach-msm:
297 */ 283 */
298 if (!iommu_present(&platform_bus_type)) { 284 } else if (!iommu_present(&platform_bus_type)) {
299 DRM_INFO("using %s VRAM carveout\n", vram); 285 DRM_INFO("using %s VRAM carveout\n", vram);
300 size = memparse(vram, NULL); 286 size = memparse(vram, NULL);
301 } 287 }
@@ -1035,9 +1021,9 @@ static const struct dev_pm_ops msm_pm_ops = {
1035 * Componentized driver support: 1021 * Componentized driver support:
1036 */ 1022 */
1037 1023
1038#ifdef CONFIG_OF 1024/*
1039/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx 1025 * NOTE: duplication of the same code as exynos or imx (or probably any other).
1040 * (or probably any other).. so probably some room for some helpers 1026 * so probably some room for some helpers
1041 */ 1027 */
1042static int compare_of(struct device *dev, void *data) 1028static int compare_of(struct device *dev, void *data)
1043{ 1029{
@@ -1062,12 +1048,6 @@ static int add_components(struct device *dev, struct component_match **matchptr,
1062 1048
1063 return 0; 1049 return 0;
1064} 1050}
1065#else
1066static int compare_dev(struct device *dev, void *data)
1067{
1068 return dev == data;
1069}
1070#endif
1071 1051
1072static int msm_drm_bind(struct device *dev) 1052static int msm_drm_bind(struct device *dev)
1073{ 1053{
@@ -1091,35 +1071,9 @@ static const struct component_master_ops msm_drm_ops = {
1091static int msm_pdev_probe(struct platform_device *pdev) 1071static int msm_pdev_probe(struct platform_device *pdev)
1092{ 1072{
1093 struct component_match *match = NULL; 1073 struct component_match *match = NULL;
1094#ifdef CONFIG_OF 1074
1095 add_components(&pdev->dev, &match, "connectors"); 1075 add_components(&pdev->dev, &match, "connectors");
1096 add_components(&pdev->dev, &match, "gpus"); 1076 add_components(&pdev->dev, &match, "gpus");
1097#else
1098 /* For non-DT case, it kinda sucks. We don't actually have a way
1099 * to know whether or not we are waiting for certain devices (or if
1100 * they are simply not present). But for non-DT we only need to
1101 * care about apq8064/apq8060/etc (all mdp4/a3xx):
1102 */
1103 static const char *devnames[] = {
1104 "hdmi_msm.0", "kgsl-3d0.0",
1105 };
1106 int i;
1107
1108 DBG("Adding components..");
1109
1110 for (i = 0; i < ARRAY_SIZE(devnames); i++) {
1111 struct device *dev;
1112
1113 dev = bus_find_device_by_name(&platform_bus_type,
1114 NULL, devnames[i]);
1115 if (!dev) {
1116 dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
1117 return -EPROBE_DEFER;
1118 }
1119
1120 component_match_add(&pdev->dev, &match, compare_dev, dev);
1121 }
1122#endif
1123 1077
1124 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 1078 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1125 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1079 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -1138,8 +1092,10 @@ static const struct platform_device_id msm_id[] = {
1138}; 1092};
1139 1093
1140static const struct of_device_id dt_match[] = { 1094static const struct of_device_id dt_match[] = {
1141 { .compatible = "qcom,mdp" }, /* mdp4 */ 1095 { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
1142 { .compatible = "qcom,mdss_mdp" }, /* mdp5 */ 1096 { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
1097 /* to support downstream DT files */
1098 { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
1143 {} 1099 {}
1144}; 1100};
1145MODULE_DEVICE_TABLE(of, dt_match); 1101MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9a713b7a009d..c1e7bba2fdb7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -31,14 +31,9 @@
31#include <linux/iommu.h> 31#include <linux/iommu.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/of_graph.h> 33#include <linux/of_graph.h>
34#include <linux/of_device.h>
34#include <asm/sizes.h> 35#include <asm/sizes.h>
35 36
36#ifndef CONFIG_OF
37#include <mach/board.h>
38#include <mach/socinfo.h>
39#include <mach/iommu_domains.h>
40#endif
41
42#include <drm/drmP.h> 37#include <drm/drmP.h>
43#include <drm/drm_atomic.h> 38#include <drm/drm_atomic.h>
44#include <drm/drm_atomic_helper.h> 39#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 3f6ec077b51d..d95af6eba602 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -121,7 +121,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
121 /* note: if fb creation failed, we can't rely on fb destroy 121 /* note: if fb creation failed, we can't rely on fb destroy
122 * to unref the bo: 122 * to unref the bo:
123 */ 123 */
124 drm_gem_object_unreference(fbdev->bo); 124 drm_gem_object_unreference_unlocked(fbdev->bo);
125 ret = PTR_ERR(fb); 125 ret = PTR_ERR(fb);
126 goto fail; 126 goto fail;
127 } 127 }
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index a34b437dbc8f..2527bf4ca5d9 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -24,7 +24,6 @@ nouveau-y += nouveau_hwmon.o
24nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 24nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
25nouveau-y += nouveau_nvif.o 25nouveau-y += nouveau_nvif.o
26nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o 26nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
27nouveau-y += nouveau_sysfs.o
28nouveau-y += nouveau_usif.o # userspace <-> nvif 27nouveau-y += nouveau_usif.o # userspace <-> nvif
29nouveau-y += nouveau_vga.o 28nouveau-y += nouveau_vga.o
30 29
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 3d96b49fe662..6f04397d43a7 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1081,8 +1081,6 @@ nouveau_crtc_set_config(struct drm_mode_set *set)
1081} 1081}
1082 1082
1083static const struct drm_crtc_funcs nv04_crtc_funcs = { 1083static const struct drm_crtc_funcs nv04_crtc_funcs = {
1084 .save = nv_crtc_save,
1085 .restore = nv_crtc_restore,
1086 .cursor_set = nv04_crtc_cursor_set, 1084 .cursor_set = nv04_crtc_cursor_set,
1087 .cursor_move = nv04_crtc_cursor_move, 1085 .cursor_move = nv04_crtc_cursor_move,
1088 .gamma_set = nv_crtc_gamma_set, 1086 .gamma_set = nv_crtc_gamma_set,
@@ -1123,6 +1121,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1123 nv_crtc->index = crtc_num; 1121 nv_crtc->index = crtc_num;
1124 nv_crtc->last_dpms = NV_DPMS_CLEARED; 1122 nv_crtc->last_dpms = NV_DPMS_CLEARED;
1125 1123
1124 nv_crtc->save = nv_crtc_save;
1125 nv_crtc->restore = nv_crtc_restore;
1126
1126 drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs); 1127 drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
1127 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); 1128 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
1128 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1129 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 78cb033bc015..b48eec395f07 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -504,8 +504,6 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
504 504
505static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = { 505static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
506 .dpms = nv04_dac_dpms, 506 .dpms = nv04_dac_dpms,
507 .save = nv04_dac_save,
508 .restore = nv04_dac_restore,
509 .mode_fixup = nv04_dac_mode_fixup, 507 .mode_fixup = nv04_dac_mode_fixup,
510 .prepare = nv04_dac_prepare, 508 .prepare = nv04_dac_prepare,
511 .commit = nv04_dac_commit, 509 .commit = nv04_dac_commit,
@@ -515,8 +513,6 @@ static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
515 513
516static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = { 514static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
517 .dpms = nv04_dac_dpms, 515 .dpms = nv04_dac_dpms,
518 .save = nv04_dac_save,
519 .restore = nv04_dac_restore,
520 .mode_fixup = nv04_dac_mode_fixup, 516 .mode_fixup = nv04_dac_mode_fixup,
521 .prepare = nv04_dac_prepare, 517 .prepare = nv04_dac_prepare,
522 .commit = nv04_dac_commit, 518 .commit = nv04_dac_commit,
@@ -545,12 +541,16 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
545 nv_encoder->dcb = entry; 541 nv_encoder->dcb = entry;
546 nv_encoder->or = ffs(entry->or) - 1; 542 nv_encoder->or = ffs(entry->or) - 1;
547 543
544 nv_encoder->enc_save = nv04_dac_save;
545 nv_encoder->enc_restore = nv04_dac_restore;
546
548 if (nv_gf4_disp_arch(dev)) 547 if (nv_gf4_disp_arch(dev))
549 helper = &nv17_dac_helper_funcs; 548 helper = &nv17_dac_helper_funcs;
550 else 549 else
551 helper = &nv04_dac_helper_funcs; 550 helper = &nv04_dac_helper_funcs;
552 551
553 drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC); 552 drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC,
553 NULL);
554 drm_encoder_helper_add(encoder, helper); 554 drm_encoder_helper_add(encoder, helper);
555 555
556 encoder->possible_crtcs = entry->heads; 556 encoder->possible_crtcs = entry->heads;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 429ab5e3025a..05bfd151d1d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -652,8 +652,6 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
652 652
653static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { 653static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
654 .dpms = nv04_lvds_dpms, 654 .dpms = nv04_lvds_dpms,
655 .save = nv04_dfp_save,
656 .restore = nv04_dfp_restore,
657 .mode_fixup = nv04_dfp_mode_fixup, 655 .mode_fixup = nv04_dfp_mode_fixup,
658 .prepare = nv04_dfp_prepare, 656 .prepare = nv04_dfp_prepare,
659 .commit = nv04_dfp_commit, 657 .commit = nv04_dfp_commit,
@@ -663,8 +661,6 @@ static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
663 661
664static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = { 662static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
665 .dpms = nv04_tmds_dpms, 663 .dpms = nv04_tmds_dpms,
666 .save = nv04_dfp_save,
667 .restore = nv04_dfp_restore,
668 .mode_fixup = nv04_dfp_mode_fixup, 664 .mode_fixup = nv04_dfp_mode_fixup,
669 .prepare = nv04_dfp_prepare, 665 .prepare = nv04_dfp_prepare,
670 .commit = nv04_dfp_commit, 666 .commit = nv04_dfp_commit,
@@ -701,12 +697,15 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
701 if (!nv_encoder) 697 if (!nv_encoder)
702 return -ENOMEM; 698 return -ENOMEM;
703 699
700 nv_encoder->enc_save = nv04_dfp_save;
701 nv_encoder->enc_restore = nv04_dfp_restore;
702
704 encoder = to_drm_encoder(nv_encoder); 703 encoder = to_drm_encoder(nv_encoder);
705 704
706 nv_encoder->dcb = entry; 705 nv_encoder->dcb = entry;
707 nv_encoder->or = ffs(entry->or) - 1; 706 nv_encoder->or = ffs(entry->or) - 1;
708 707
709 drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); 708 drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type, NULL);
710 drm_encoder_helper_add(encoder, helper); 709 drm_encoder_helper_add(encoder, helper);
711 710
712 encoder->possible_crtcs = entry->heads; 711 encoder->possible_crtcs = entry->heads;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 9e650081c357..b4a6bc433ef5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -39,7 +39,8 @@ nv04_display_create(struct drm_device *dev)
39 struct dcb_table *dcb = &drm->vbios.dcb; 39 struct dcb_table *dcb = &drm->vbios.dcb;
40 struct drm_connector *connector, *ct; 40 struct drm_connector *connector, *ct;
41 struct drm_encoder *encoder; 41 struct drm_encoder *encoder;
42 struct drm_crtc *crtc; 42 struct nouveau_encoder *nv_encoder;
43 struct nouveau_crtc *crtc;
43 struct nv04_display *disp; 44 struct nv04_display *disp;
44 int i, ret; 45 int i, ret;
45 46
@@ -107,14 +108,11 @@ nv04_display_create(struct drm_device *dev)
107 } 108 }
108 109
109 /* Save previous state */ 110 /* Save previous state */
110 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 111 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
111 crtc->funcs->save(crtc); 112 crtc->save(&crtc->base);
112
113 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
114 const struct drm_encoder_helper_funcs *func = encoder->helper_private;
115 113
116 func->save(encoder); 114 list_for_each_entry(nv_encoder, &dev->mode_config.encoder_list, base.base.head)
117 } 115 nv_encoder->enc_save(&nv_encoder->base.base);
118 116
119 nouveau_overlay_init(dev); 117 nouveau_overlay_init(dev);
120 118
@@ -126,8 +124,9 @@ nv04_display_destroy(struct drm_device *dev)
126{ 124{
127 struct nv04_display *disp = nv04_display(dev); 125 struct nv04_display *disp = nv04_display(dev);
128 struct nouveau_drm *drm = nouveau_drm(dev); 126 struct nouveau_drm *drm = nouveau_drm(dev);
129 struct drm_encoder *encoder; 127 struct nouveau_encoder *encoder;
130 struct drm_crtc *crtc; 128 struct drm_crtc *crtc;
129 struct nouveau_crtc *nv_crtc;
131 130
132 /* Turn every CRTC off. */ 131 /* Turn every CRTC off. */
133 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -139,14 +138,11 @@ nv04_display_destroy(struct drm_device *dev)
139 } 138 }
140 139
141 /* Restore state */ 140 /* Restore state */
142 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 141 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
143 const struct drm_encoder_helper_funcs *func = encoder->helper_private; 142 encoder->enc_restore(&encoder->base.base);
144 143
145 func->restore(encoder); 144 list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head)
146 } 145 nv_crtc->restore(&nv_crtc->base);
147
148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
149 crtc->funcs->restore(crtc);
150 146
151 nouveau_hw_save_vga_fonts(dev, 0); 147 nouveau_hw_save_vga_fonts(dev, 0);
152 148
@@ -159,8 +155,8 @@ nv04_display_destroy(struct drm_device *dev)
159int 155int
160nv04_display_init(struct drm_device *dev) 156nv04_display_init(struct drm_device *dev)
161{ 157{
162 struct drm_encoder *encoder; 158 struct nouveau_encoder *encoder;
163 struct drm_crtc *crtc; 159 struct nouveau_crtc *crtc;
164 160
165 /* meh.. modeset apparently doesn't setup all the regs and depends 161 /* meh.. modeset apparently doesn't setup all the regs and depends
166 * on pre-existing state, for now load the state of the card *before* 162 * on pre-existing state, for now load the state of the card *before*
@@ -170,14 +166,11 @@ nv04_display_init(struct drm_device *dev)
170 * save/restore "pre-load" state, but more general so we can save 166 * save/restore "pre-load" state, but more general so we can save
171 * on suspend too. 167 * on suspend too.
172 */ 168 */
173 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 169 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
174 const struct drm_encoder_helper_funcs *func = encoder->helper_private; 170 crtc->save(&crtc->base);
175
176 func->restore(encoder);
177 }
178 171
179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 172 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
180 crtc->funcs->restore(crtc); 173 encoder->enc_save(&encoder->base.base);
181 174
182 return 0; 175 return 0;
183} 176}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 5345eb5378a8..54e9fb9eb5c0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -192,8 +192,6 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
192 192
193static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = { 193static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
194 .dpms = nv04_tv_dpms, 194 .dpms = nv04_tv_dpms,
195 .save = drm_i2c_encoder_save,
196 .restore = drm_i2c_encoder_restore,
197 .mode_fixup = drm_i2c_encoder_mode_fixup, 195 .mode_fixup = drm_i2c_encoder_mode_fixup,
198 .prepare = nv04_tv_prepare, 196 .prepare = nv04_tv_prepare,
199 .commit = nv04_tv_commit, 197 .commit = nv04_tv_commit,
@@ -225,9 +223,13 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
225 /* Initialize the common members */ 223 /* Initialize the common members */
226 encoder = to_drm_encoder(nv_encoder); 224 encoder = to_drm_encoder(nv_encoder);
227 225
228 drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); 226 drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC,
227 NULL);
229 drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs); 228 drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
230 229
230 nv_encoder->enc_save = drm_i2c_encoder_save;
231 nv_encoder->enc_restore = drm_i2c_encoder_restore;
232
231 encoder->possible_crtcs = entry->heads; 233 encoder->possible_crtcs = entry->heads;
232 encoder->possible_clones = 0; 234 encoder->possible_clones = 0;
233 nv_encoder->dcb = entry; 235 nv_encoder->dcb = entry;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index b734195d80a0..163317d26de9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -769,10 +769,8 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
769 kfree(tv_enc); 769 kfree(tv_enc);
770} 770}
771 771
772static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { 772static const struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
773 .dpms = nv17_tv_dpms, 773 .dpms = nv17_tv_dpms,
774 .save = nv17_tv_save,
775 .restore = nv17_tv_restore,
776 .mode_fixup = nv17_tv_mode_fixup, 774 .mode_fixup = nv17_tv_mode_fixup,
777 .prepare = nv17_tv_prepare, 775 .prepare = nv17_tv_prepare,
778 .commit = nv17_tv_commit, 776 .commit = nv17_tv_commit,
@@ -780,14 +778,14 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
780 .detect = nv17_tv_detect, 778 .detect = nv17_tv_detect,
781}; 779};
782 780
783static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { 781static const struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
784 .get_modes = nv17_tv_get_modes, 782 .get_modes = nv17_tv_get_modes,
785 .mode_valid = nv17_tv_mode_valid, 783 .mode_valid = nv17_tv_mode_valid,
786 .create_resources = nv17_tv_create_resources, 784 .create_resources = nv17_tv_create_resources,
787 .set_property = nv17_tv_set_property, 785 .set_property = nv17_tv_set_property,
788}; 786};
789 787
790static struct drm_encoder_funcs nv17_tv_funcs = { 788static const struct drm_encoder_funcs nv17_tv_funcs = {
791 .destroy = nv17_tv_destroy, 789 .destroy = nv17_tv_destroy,
792}; 790};
793 791
@@ -816,10 +814,14 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
816 tv_enc->base.dcb = entry; 814 tv_enc->base.dcb = entry;
817 tv_enc->base.or = ffs(entry->or) - 1; 815 tv_enc->base.or = ffs(entry->or) - 1;
818 816
819 drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC); 817 drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC,
818 NULL);
820 drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); 819 drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
821 to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; 820 to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
822 821
822 tv_enc->base.enc_save = nv17_tv_save;
823 tv_enc->base.enc_restore = nv17_tv_restore;
824
823 encoder->possible_crtcs = entry->heads; 825 encoder->possible_crtcs = entry->heads;
824 encoder->possible_clones = 0; 826 encoder->possible_clones = 0;
825 827
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0002.h b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
new file mode 100644
index 000000000000..6d72ed38da32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
@@ -0,0 +1,66 @@
1#ifndef __NVIF_CL0002_H__
2#define __NVIF_CL0002_H__
3
4struct nv_dma_v0 {
5 __u8 version;
6#define NV_DMA_V0_TARGET_VM 0x00
7#define NV_DMA_V0_TARGET_VRAM 0x01
8#define NV_DMA_V0_TARGET_PCI 0x02
9#define NV_DMA_V0_TARGET_PCI_US 0x03
10#define NV_DMA_V0_TARGET_AGP 0x04
11 __u8 target;
12#define NV_DMA_V0_ACCESS_VM 0x00
13#define NV_DMA_V0_ACCESS_RD 0x01
14#define NV_DMA_V0_ACCESS_WR 0x02
15#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR)
16 __u8 access;
17 __u8 pad03[5];
18 __u64 start;
19 __u64 limit;
20 /* ... chipset-specific class data */
21};
22
23struct nv50_dma_v0 {
24 __u8 version;
25#define NV50_DMA_V0_PRIV_VM 0x00
26#define NV50_DMA_V0_PRIV_US 0x01
27#define NV50_DMA_V0_PRIV__S 0x02
28 __u8 priv;
29#define NV50_DMA_V0_PART_VM 0x00
30#define NV50_DMA_V0_PART_256 0x01
31#define NV50_DMA_V0_PART_1KB 0x02
32 __u8 part;
33#define NV50_DMA_V0_COMP_NONE 0x00
34#define NV50_DMA_V0_COMP_1 0x01
35#define NV50_DMA_V0_COMP_2 0x02
36#define NV50_DMA_V0_COMP_VM 0x03
37 __u8 comp;
38#define NV50_DMA_V0_KIND_PITCH 0x00
39#define NV50_DMA_V0_KIND_VM 0x7f
40 __u8 kind;
41 __u8 pad05[3];
42};
43
44struct gf100_dma_v0 {
45 __u8 version;
46#define GF100_DMA_V0_PRIV_VM 0x00
47#define GF100_DMA_V0_PRIV_US 0x01
48#define GF100_DMA_V0_PRIV__S 0x02
49 __u8 priv;
50#define GF100_DMA_V0_KIND_PITCH 0x00
51#define GF100_DMA_V0_KIND_VM 0xff
52 __u8 kind;
53 __u8 pad03[5];
54};
55
56struct gf119_dma_v0 {
57 __u8 version;
58#define GF119_DMA_V0_PAGE_LP 0x00
59#define GF119_DMA_V0_PAGE_SP 0x01
60 __u8 page;
61#define GF119_DMA_V0_KIND_PITCH 0x00
62#define GF119_DMA_V0_KIND_VM 0xff
63 __u8 kind;
64 __u8 pad03[5];
65};
66#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0046.h b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
new file mode 100644
index 000000000000..a6a71f4ad91e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
@@ -0,0 +1,28 @@
1#ifndef __NVIF_CL0046_H__
2#define __NVIF_CL0046_H__
3
4#define NV04_DISP_NTFY_VBLANK 0x00
5#define NV04_DISP_NTFY_CONN 0x01
6
7struct nv04_disp_mthd_v0 {
8 __u8 version;
9#define NV04_DISP_SCANOUTPOS 0x00
10 __u8 method;
11 __u8 head;
12 __u8 pad03[5];
13};
14
15struct nv04_disp_scanoutpos_v0 {
16 __u8 version;
17 __u8 pad01[7];
18 __s64 time[2];
19 __u16 vblanks;
20 __u16 vblanke;
21 __u16 vtotal;
22 __u16 vline;
23 __u16 hblanks;
24 __u16 hblanke;
25 __u16 htotal;
26 __u16 hline;
27};
28#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl006b.h b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
new file mode 100644
index 000000000000..309ab8a3d9e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
@@ -0,0 +1,11 @@
1#ifndef __NVIF_CL006B_H__
2#define __NVIF_CL006B_H__
3
4struct nv03_channel_dma_v0 {
5 __u8 version;
6 __u8 chid;
7 __u8 pad02[2];
8 __u32 offset;
9 __u64 pushbuf;
10};
11#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
new file mode 100644
index 000000000000..331620a52afa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -0,0 +1,45 @@
1#ifndef __NVIF_CL0080_H__
2#define __NVIF_CL0080_H__
3
4struct nv_device_v0 {
5 __u8 version;
6 __u8 pad01[7];
7 __u64 device; /* device identifier, ~0 for client default */
8};
9
10#define NV_DEVICE_V0_INFO 0x00
11#define NV_DEVICE_V0_TIME 0x01
12
13struct nv_device_info_v0 {
14 __u8 version;
15#define NV_DEVICE_INFO_V0_IGP 0x00
16#define NV_DEVICE_INFO_V0_PCI 0x01
17#define NV_DEVICE_INFO_V0_AGP 0x02
18#define NV_DEVICE_INFO_V0_PCIE 0x03
19#define NV_DEVICE_INFO_V0_SOC 0x04
20 __u8 platform;
21 __u16 chipset; /* from NV_PMC_BOOT_0 */
22 __u8 revision; /* from NV_PMC_BOOT_0 */
23#define NV_DEVICE_INFO_V0_TNT 0x01
24#define NV_DEVICE_INFO_V0_CELSIUS 0x02
25#define NV_DEVICE_INFO_V0_KELVIN 0x03
26#define NV_DEVICE_INFO_V0_RANKINE 0x04
27#define NV_DEVICE_INFO_V0_CURIE 0x05
28#define NV_DEVICE_INFO_V0_TESLA 0x06
29#define NV_DEVICE_INFO_V0_FERMI 0x07
30#define NV_DEVICE_INFO_V0_KEPLER 0x08
31#define NV_DEVICE_INFO_V0_MAXWELL 0x09
32 __u8 family;
33 __u8 pad06[2];
34 __u64 ram_size;
35 __u64 ram_user;
36 char chip[16];
37 char name[64];
38};
39
40struct nv_device_time_v0 {
41 __u8 version;
42 __u8 pad01[7];
43 __u64 time;
44};
45#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
new file mode 100644
index 000000000000..aa94b8cf9679
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
@@ -0,0 +1,12 @@
1#ifndef __NVIF_CL506E_H__
2#define __NVIF_CL506E_H__
3
4struct nv50_channel_dma_v0 {
5 __u8 version;
6 __u8 chid;
7 __u8 pad02[6];
8 __u64 vm;
9 __u64 pushbuf;
10 __u64 offset;
11};
12#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
new file mode 100644
index 000000000000..3b7101966de4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
@@ -0,0 +1,13 @@
1#ifndef __NVIF_CL506F_H__
2#define __NVIF_CL506F_H__
3
4struct nv50_channel_gpfifo_v0 {
5 __u8 version;
6 __u8 chid;
7 __u8 pad02[2];
8 __u32 ilength;
9 __u64 ioffset;
10 __u64 pushbuf;
11 __u64 vm;
12};
13#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
new file mode 100644
index 000000000000..d15c296b5f33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -0,0 +1,99 @@
1#ifndef __NVIF_CL5070_H__
2#define __NVIF_CL5070_H__
3
4#define NV50_DISP_MTHD 0x00
5
6struct nv50_disp_mthd_v0 {
7 __u8 version;
8#define NV50_DISP_SCANOUTPOS 0x00
9 __u8 method;
10 __u8 head;
11 __u8 pad03[5];
12};
13
14struct nv50_disp_scanoutpos_v0 {
15 __u8 version;
16 __u8 pad01[7];
17 __s64 time[2];
18 __u16 vblanks;
19 __u16 vblanke;
20 __u16 vtotal;
21 __u16 vline;
22 __u16 hblanks;
23 __u16 hblanke;
24 __u16 htotal;
25 __u16 hline;
26};
27
28struct nv50_disp_mthd_v1 {
29 __u8 version;
30#define NV50_DISP_MTHD_V1_DAC_PWR 0x10
31#define NV50_DISP_MTHD_V1_DAC_LOAD 0x11
32#define NV50_DISP_MTHD_V1_SOR_PWR 0x20
33#define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21
34#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
35#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
36#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
37#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
38 __u8 method;
39 __u16 hasht;
40 __u16 hashm;
41 __u8 pad06[2];
42};
43
44struct nv50_disp_dac_pwr_v0 {
45 __u8 version;
46 __u8 state;
47 __u8 data;
48 __u8 vsync;
49 __u8 hsync;
50 __u8 pad05[3];
51};
52
53struct nv50_disp_dac_load_v0 {
54 __u8 version;
55 __u8 load;
56 __u8 pad02[2];
57 __u32 data;
58};
59
60struct nv50_disp_sor_pwr_v0 {
61 __u8 version;
62 __u8 state;
63 __u8 pad02[6];
64};
65
66struct nv50_disp_sor_hda_eld_v0 {
67 __u8 version;
68 __u8 pad01[7];
69 __u8 data[];
70};
71
72struct nv50_disp_sor_hdmi_pwr_v0 {
73 __u8 version;
74 __u8 state;
75 __u8 max_ac_packet;
76 __u8 rekey;
77 __u8 pad04[4];
78};
79
80struct nv50_disp_sor_lvds_script_v0 {
81 __u8 version;
82 __u8 pad01[1];
83 __u16 script;
84 __u8 pad04[4];
85};
86
87struct nv50_disp_sor_dp_pwr_v0 {
88 __u8 version;
89 __u8 state;
90 __u8 pad02[6];
91};
92
93struct nv50_disp_pior_pwr_v0 {
94 __u8 version;
95 __u8 state;
96 __u8 type;
97 __u8 pad03[5];
98};
99#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507a.h b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
new file mode 100644
index 000000000000..12e0643b78bd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
@@ -0,0 +1,11 @@
1#ifndef __NVIF_CL507A_H__
2#define __NVIF_CL507A_H__
3
4struct nv50_disp_cursor_v0 {
5 __u8 version;
6 __u8 head;
7 __u8 pad02[6];
8};
9
10#define NV50_DISP_CURSOR_V0_NTFY_UEVENT 0x00
11#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507b.h b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
new file mode 100644
index 000000000000..99e9d8c47f60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
@@ -0,0 +1,11 @@
1#ifndef __NVIF_CL507B_H__
2#define __NVIF_CL507B_H__
3
4struct nv50_disp_overlay_v0 {
5 __u8 version;
6 __u8 head;
7 __u8 pad02[6];
8};
9
10#define NV50_DISP_OVERLAY_V0_NTFY_UEVENT 0x00
11#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507c.h b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
new file mode 100644
index 000000000000..6af70dbdfd9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
@@ -0,0 +1,12 @@
1#ifndef __NVIF_CL507C_H__
2#define __NVIF_CL507C_H__
3
4struct nv50_disp_base_channel_dma_v0 {
5 __u8 version;
6 __u8 head;
7 __u8 pad02[6];
8 __u64 pushbuf;
9};
10
11#define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
12#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507d.h b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
new file mode 100644
index 000000000000..5ab0c9e4c6a3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
@@ -0,0 +1,11 @@
1#ifndef __NVIF_CL507D_H__
2#define __NVIF_CL507D_H__
3
4struct nv50_disp_core_channel_dma_v0 {
5 __u8 version;
6 __u8 pad01[7];
7 __u64 pushbuf;
8};
9
10#define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
11#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507e.h b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
new file mode 100644
index 000000000000..c06209f3cac4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
@@ -0,0 +1,12 @@
1#ifndef __NVIF_CL507E_H__
2#define __NVIF_CL507E_H__
3
4struct nv50_disp_overlay_channel_dma_v0 {
5 __u8 version;
6 __u8 head;
7 __u8 pad02[6];
8 __u64 pushbuf;
9};
10
11#define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
12#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
new file mode 100644
index 000000000000..05e6ef7cd190
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -0,0 +1,14 @@
1#ifndef __NVIF_CL826E_H__
2#define __NVIF_CL826E_H__
3
4struct g82_channel_dma_v0 {
5 __u8 version;
6 __u8 chid;
7 __u8 pad02[6];
8 __u64 vm;
9 __u64 pushbuf;
10 __u64 offset;
11};
12
13#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
14#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
new file mode 100644
index 000000000000..cecafcb1e954
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -0,0 +1,15 @@
1#ifndef __NVIF_CL826F_H__
2#define __NVIF_CL826F_H__
3
4struct g82_channel_gpfifo_v0 {
5 __u8 version;
6 __u8 chid;
7 __u8 pad02[2];
8 __u32 ilength;
9 __u64 ioffset;
10 __u64 pushbuf;
11 __u64 vm;
12};
13
14#define G82_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00
15#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
new file mode 100644
index 000000000000..2caf0838fcfd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -0,0 +1,14 @@
1#ifndef __NVIF_CL906F_H__
2#define __NVIF_CL906F_H__
3
4struct fermi_channel_gpfifo_v0 {
5 __u8 version;
6 __u8 chid;
7 __u8 pad02[2];
8 __u32 ilength;
9 __u64 ioffset;
10 __u64 vm;
11};
12
13#define FERMI_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00
14#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl9097.h b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
new file mode 100644
index 000000000000..4057676d2981
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
@@ -0,0 +1,44 @@
1#ifndef __NVIF_CL9097_H__
2#define __NVIF_CL9097_H__
3
4#define FERMI_A_ZBC_COLOR 0x00
5#define FERMI_A_ZBC_DEPTH 0x01
6
7struct fermi_a_zbc_color_v0 {
8 __u8 version;
9#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
10#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
11#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
12#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
13#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
14#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
15#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
16#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
17#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
18#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
19#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
20#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
21#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
22#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
23#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
24#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
25#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
26#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
27#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
28 __u8 format;
29 __u8 index;
30 __u8 pad03[5];
31 __u32 ds[4];
32 __u32 l2[4];
33};
34
35struct fermi_a_zbc_depth_v0 {
36 __u8 version;
37#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
38 __u8 format;
39 __u8 index;
40 __u8 pad03[5];
41 __u32 ds;
42 __u32 l2;
43};
44#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
new file mode 100644
index 000000000000..85b7827eb782
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -0,0 +1,21 @@
1#ifndef __NVIF_CLA06F_H__
2#define __NVIF_CLA06F_H__
3
4struct kepler_channel_gpfifo_a_v0 {
5 __u8 version;
6#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01
7#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPDEC 0x02
8#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPPP 0x04
9#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSVLD 0x08
10#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10
11#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20
12#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
13 __u8 engine;
14 __u16 chid;
15 __u32 ilength;
16 __u64 ioffset;
17 __u64 vm;
18};
19
20#define KEPLER_CHANNEL_GPFIFO_A_V0_NTFY_UEVENT 0x00
21#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 95a64d89547c..4179cd65ac0a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -1,16 +1,21 @@
1#ifndef __NVIF_CLASS_H__ 1#ifndef __NVIF_CLASS_H__
2#define __NVIF_CLASS_H__ 2#define __NVIF_CLASS_H__
3 3
4/******************************************************************************* 4/* these class numbers are made up by us, and not nvidia-assigned */
5 * class identifiers 5#define NVIF_CLASS_CONTROL /* if0001.h */ -1
6 ******************************************************************************/ 6#define NVIF_CLASS_PERFMON /* if0002.h */ -2
7#define NVIF_CLASS_PERFDOM /* if0003.h */ -3
8#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4
9#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5
10#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6
11#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7
7 12
8/* the below match nvidia-assigned (either in hw, or sw) class numbers */ 13/* the below match nvidia-assigned (either in hw, or sw) class numbers */
9#define NV_DEVICE 0x00000080 14#define NV_DEVICE /* cl0080.h */ 0x00000080
10 15
11#define NV_DMA_FROM_MEMORY 0x00000002 16#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002
12#define NV_DMA_TO_MEMORY 0x00000003 17#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003
13#define NV_DMA_IN_MEMORY 0x0000003d 18#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d
14 19
15#define FERMI_TWOD_A 0x0000902d 20#define FERMI_TWOD_A 0x0000902d
16 21
@@ -19,85 +24,85 @@
19#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 24#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
20#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140 25#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
21 26
22#define NV04_DISP 0x00000046 27#define NV04_DISP /* cl0046.h */ 0x00000046
23 28
24#define NV03_CHANNEL_DMA 0x0000006b 29#define NV03_CHANNEL_DMA /* cl506b.h */ 0x0000006b
25#define NV10_CHANNEL_DMA 0x0000006e 30#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
26#define NV17_CHANNEL_DMA 0x0000176e 31#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
27#define NV40_CHANNEL_DMA 0x0000406e 32#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
28#define NV50_CHANNEL_DMA 0x0000506e 33#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
29#define G82_CHANNEL_DMA 0x0000826e 34#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
30 35
31#define NV50_CHANNEL_GPFIFO 0x0000506f 36#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
32#define G82_CHANNEL_GPFIFO 0x0000826f 37#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
33#define FERMI_CHANNEL_GPFIFO 0x0000906f 38#define FERMI_CHANNEL_GPFIFO /* cl906f.h */ 0x0000906f
34#define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f 39#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
35#define MAXWELL_CHANNEL_GPFIFO_A 0x0000b06f 40#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
36 41
37#define NV50_DISP 0x00005070 42#define NV50_DISP /* cl5070.h */ 0x00005070
38#define G82_DISP 0x00008270 43#define G82_DISP /* cl5070.h */ 0x00008270
39#define GT200_DISP 0x00008370 44#define GT200_DISP /* cl5070.h */ 0x00008370
40#define GT214_DISP 0x00008570 45#define GT214_DISP /* cl5070.h */ 0x00008570
41#define GT206_DISP 0x00008870 46#define GT206_DISP /* cl5070.h */ 0x00008870
42#define GF110_DISP 0x00009070 47#define GF110_DISP /* cl5070.h */ 0x00009070
43#define GK104_DISP 0x00009170 48#define GK104_DISP /* cl5070.h */ 0x00009170
44#define GK110_DISP 0x00009270 49#define GK110_DISP /* cl5070.h */ 0x00009270
45#define GM107_DISP 0x00009470 50#define GM107_DISP /* cl5070.h */ 0x00009470
46#define GM204_DISP 0x00009570 51#define GM204_DISP /* cl5070.h */ 0x00009570
47 52
48#define NV31_MPEG 0x00003174 53#define NV31_MPEG 0x00003174
49#define G82_MPEG 0x00008274 54#define G82_MPEG 0x00008274
50 55
51#define NV74_VP2 0x00007476 56#define NV74_VP2 0x00007476
52 57
53#define NV50_DISP_CURSOR 0x0000507a 58#define NV50_DISP_CURSOR /* cl507a.h */ 0x0000507a
54#define G82_DISP_CURSOR 0x0000827a 59#define G82_DISP_CURSOR /* cl507a.h */ 0x0000827a
55#define GT214_DISP_CURSOR 0x0000857a 60#define GT214_DISP_CURSOR /* cl507a.h */ 0x0000857a
56#define GF110_DISP_CURSOR 0x0000907a 61#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
57#define GK104_DISP_CURSOR 0x0000917a 62#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
58 63
59#define NV50_DISP_OVERLAY 0x0000507b 64#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
60#define G82_DISP_OVERLAY 0x0000827b 65#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
61#define GT214_DISP_OVERLAY 0x0000857b 66#define GT214_DISP_OVERLAY /* cl507b.h */ 0x0000857b
62#define GF110_DISP_OVERLAY 0x0000907b 67#define GF110_DISP_OVERLAY /* cl507b.h */ 0x0000907b
63#define GK104_DISP_OVERLAY 0x0000917b 68#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
64 69
65#define NV50_DISP_BASE_CHANNEL_DMA 0x0000507c 70#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
66#define G82_DISP_BASE_CHANNEL_DMA 0x0000827c 71#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
67#define GT200_DISP_BASE_CHANNEL_DMA 0x0000837c 72#define GT200_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000837c
68#define GT214_DISP_BASE_CHANNEL_DMA 0x0000857c 73#define GT214_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000857c
69#define GF110_DISP_BASE_CHANNEL_DMA 0x0000907c 74#define GF110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000907c
70#define GK104_DISP_BASE_CHANNEL_DMA 0x0000917c 75#define GK104_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000917c
71#define GK110_DISP_BASE_CHANNEL_DMA 0x0000927c 76#define GK110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000927c
72 77
73#define NV50_DISP_CORE_CHANNEL_DMA 0x0000507d 78#define NV50_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000507d
74#define G82_DISP_CORE_CHANNEL_DMA 0x0000827d 79#define G82_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000827d
75#define GT200_DISP_CORE_CHANNEL_DMA 0x0000837d 80#define GT200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000837d
76#define GT214_DISP_CORE_CHANNEL_DMA 0x0000857d 81#define GT214_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000857d
77#define GT206_DISP_CORE_CHANNEL_DMA 0x0000887d 82#define GT206_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000887d
78#define GF110_DISP_CORE_CHANNEL_DMA 0x0000907d 83#define GF110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000907d
79#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d 84#define GK104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000917d
80#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d 85#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
81#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d 86#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
82#define GM204_DISP_CORE_CHANNEL_DMA 0x0000957d 87#define GM204_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
83 88
84#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e 89#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
85#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e 90#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
86#define GT200_DISP_OVERLAY_CHANNEL_DMA 0x0000837e 91#define GT200_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000837e
87#define GT214_DISP_OVERLAY_CHANNEL_DMA 0x0000857e 92#define GT214_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000857e
88#define GF110_DISP_OVERLAY_CONTROL_DMA 0x0000907e 93#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
89#define GK104_DISP_OVERLAY_CONTROL_DMA 0x0000917e 94#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
90 95
91#define FERMI_A 0x00009097 96#define FERMI_A /* cl9097.h */ 0x00009097
92#define FERMI_B 0x00009197 97#define FERMI_B /* cl9097.h */ 0x00009197
93#define FERMI_C 0x00009297 98#define FERMI_C /* cl9097.h */ 0x00009297
94 99
95#define KEPLER_A 0x0000a097 100#define KEPLER_A /* cl9097.h */ 0x0000a097
96#define KEPLER_B 0x0000a197 101#define KEPLER_B /* cl9097.h */ 0x0000a197
97#define KEPLER_C 0x0000a297 102#define KEPLER_C /* cl9097.h */ 0x0000a297
98 103
99#define MAXWELL_A 0x0000b097 104#define MAXWELL_A /* cl9097.h */ 0x0000b097
100#define MAXWELL_B 0x0000b197 105#define MAXWELL_B /* cl9097.h */ 0x0000b197
101 106
102#define NV74_BSP 0x000074b0 107#define NV74_BSP 0x000074b0
103 108
@@ -133,540 +138,4 @@
133#define MAXWELL_COMPUTE_B 0x0000b1c0 138#define MAXWELL_COMPUTE_B 0x0000b1c0
134 139
135#define NV74_CIPHER 0x000074c1 140#define NV74_CIPHER 0x000074c1
136
137/*******************************************************************************
138 * client
139 ******************************************************************************/
140
141#define NV_CLIENT_DEVLIST 0x00
142
143struct nv_client_devlist_v0 {
144 __u8 version;
145 __u8 count;
146 __u8 pad02[6];
147 __u64 device[];
148};
149
150
151/*******************************************************************************
152 * device
153 ******************************************************************************/
154
155struct nv_device_v0 {
156 __u8 version;
157 __u8 pad01[7];
158 __u64 device; /* device identifier, ~0 for client default */
159};
160
161#define NV_DEVICE_V0_INFO 0x00
162#define NV_DEVICE_V0_TIME 0x01
163
164struct nv_device_info_v0 {
165 __u8 version;
166#define NV_DEVICE_INFO_V0_IGP 0x00
167#define NV_DEVICE_INFO_V0_PCI 0x01
168#define NV_DEVICE_INFO_V0_AGP 0x02
169#define NV_DEVICE_INFO_V0_PCIE 0x03
170#define NV_DEVICE_INFO_V0_SOC 0x04
171 __u8 platform;
172 __u16 chipset; /* from NV_PMC_BOOT_0 */
173 __u8 revision; /* from NV_PMC_BOOT_0 */
174#define NV_DEVICE_INFO_V0_TNT 0x01
175#define NV_DEVICE_INFO_V0_CELSIUS 0x02
176#define NV_DEVICE_INFO_V0_KELVIN 0x03
177#define NV_DEVICE_INFO_V0_RANKINE 0x04
178#define NV_DEVICE_INFO_V0_CURIE 0x05
179#define NV_DEVICE_INFO_V0_TESLA 0x06
180#define NV_DEVICE_INFO_V0_FERMI 0x07
181#define NV_DEVICE_INFO_V0_KEPLER 0x08
182#define NV_DEVICE_INFO_V0_MAXWELL 0x09
183 __u8 family;
184 __u8 pad06[2];
185 __u64 ram_size;
186 __u64 ram_user;
187 char chip[16];
188 char name[64];
189};
190
191struct nv_device_time_v0 {
192 __u8 version;
193 __u8 pad01[7];
194 __u64 time;
195};
196
197
198/*******************************************************************************
199 * context dma
200 ******************************************************************************/
201
202struct nv_dma_v0 {
203 __u8 version;
204#define NV_DMA_V0_TARGET_VM 0x00
205#define NV_DMA_V0_TARGET_VRAM 0x01
206#define NV_DMA_V0_TARGET_PCI 0x02
207#define NV_DMA_V0_TARGET_PCI_US 0x03
208#define NV_DMA_V0_TARGET_AGP 0x04
209 __u8 target;
210#define NV_DMA_V0_ACCESS_VM 0x00
211#define NV_DMA_V0_ACCESS_RD 0x01
212#define NV_DMA_V0_ACCESS_WR 0x02
213#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR)
214 __u8 access;
215 __u8 pad03[5];
216 __u64 start;
217 __u64 limit;
218 /* ... chipset-specific class data */
219};
220
221struct nv50_dma_v0 {
222 __u8 version;
223#define NV50_DMA_V0_PRIV_VM 0x00
224#define NV50_DMA_V0_PRIV_US 0x01
225#define NV50_DMA_V0_PRIV__S 0x02
226 __u8 priv;
227#define NV50_DMA_V0_PART_VM 0x00
228#define NV50_DMA_V0_PART_256 0x01
229#define NV50_DMA_V0_PART_1KB 0x02
230 __u8 part;
231#define NV50_DMA_V0_COMP_NONE 0x00
232#define NV50_DMA_V0_COMP_1 0x01
233#define NV50_DMA_V0_COMP_2 0x02
234#define NV50_DMA_V0_COMP_VM 0x03
235 __u8 comp;
236#define NV50_DMA_V0_KIND_PITCH 0x00
237#define NV50_DMA_V0_KIND_VM 0x7f
238 __u8 kind;
239 __u8 pad05[3];
240};
241
242struct gf100_dma_v0 {
243 __u8 version;
244#define GF100_DMA_V0_PRIV_VM 0x00
245#define GF100_DMA_V0_PRIV_US 0x01
246#define GF100_DMA_V0_PRIV__S 0x02
247 __u8 priv;
248#define GF100_DMA_V0_KIND_PITCH 0x00
249#define GF100_DMA_V0_KIND_VM 0xff
250 __u8 kind;
251 __u8 pad03[5];
252};
253
254struct gf119_dma_v0 {
255 __u8 version;
256#define GF119_DMA_V0_PAGE_LP 0x00
257#define GF119_DMA_V0_PAGE_SP 0x01
258 __u8 page;
259#define GF119_DMA_V0_KIND_PITCH 0x00
260#define GF119_DMA_V0_KIND_VM 0xff
261 __u8 kind;
262 __u8 pad03[5];
263};
264
265
266/*******************************************************************************
267 * perfmon
268 ******************************************************************************/
269
270#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
271#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
272#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
273
274struct nvif_perfmon_query_domain_v0 {
275 __u8 version;
276 __u8 id;
277 __u8 counter_nr;
278 __u8 iter;
279 __u16 signal_nr;
280 __u8 pad05[2];
281 char name[64];
282};
283
284struct nvif_perfmon_query_signal_v0 {
285 __u8 version;
286 __u8 domain;
287 __u16 iter;
288 __u8 signal;
289 __u8 source_nr;
290 __u8 pad05[2];
291 char name[64];
292};
293
294struct nvif_perfmon_query_source_v0 {
295 __u8 version;
296 __u8 domain;
297 __u8 signal;
298 __u8 iter;
299 __u8 pad04[4];
300 __u32 source;
301 __u32 mask;
302 char name[64];
303};
304
305
306/*******************************************************************************
307 * perfdom
308 ******************************************************************************/
309
310struct nvif_perfdom_v0 {
311 __u8 version;
312 __u8 domain;
313 __u8 mode;
314 __u8 pad03[1];
315 struct {
316 __u8 signal[4];
317 __u64 source[4][8];
318 __u16 logic_op;
319 } ctr[4];
320};
321
322#define NVIF_PERFDOM_V0_INIT 0x00
323#define NVIF_PERFDOM_V0_SAMPLE 0x01
324#define NVIF_PERFDOM_V0_READ 0x02
325
326struct nvif_perfdom_init {
327};
328
329struct nvif_perfdom_sample {
330};
331
332struct nvif_perfdom_read_v0 {
333 __u8 version;
334 __u8 pad01[7];
335 __u32 ctr[4];
336 __u32 clk;
337 __u8 pad04[4];
338};
339
340
341/*******************************************************************************
342 * device control
343 ******************************************************************************/
344
345#define NVIF_CONTROL_PSTATE_INFO 0x00
346#define NVIF_CONTROL_PSTATE_ATTR 0x01
347#define NVIF_CONTROL_PSTATE_USER 0x02
348
349struct nvif_control_pstate_info_v0 {
350 __u8 version;
351 __u8 count; /* out: number of power states */
352#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE (-1)
353#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_PERFMON (-2)
354 __s8 ustate_ac; /* out: target pstate index */
355 __s8 ustate_dc; /* out: target pstate index */
356 __s8 pwrsrc; /* out: current power source */
357#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN (-1)
358#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_PERFMON (-2)
359 __s8 pstate; /* out: current pstate index */
360 __u8 pad06[2];
361};
362
363struct nvif_control_pstate_attr_v0 {
364 __u8 version;
365#define NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT (-1)
366 __s8 state; /* in: index of pstate to query
367 * out: pstate identifier
368 */
369 __u8 index; /* in: index of attribute to query
370 * out: index of next attribute, or 0 if no more
371 */
372 __u8 pad03[5];
373 __u32 min;
374 __u32 max;
375 char name[32];
376 char unit[16];
377};
378
379struct nvif_control_pstate_user_v0 {
380 __u8 version;
381#define NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN (-1)
382#define NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON (-2)
383 __s8 ustate; /* in: pstate identifier */
384 __s8 pwrsrc; /* in: target power source */
385 __u8 pad03[5];
386};
387
388
389/*******************************************************************************
390 * DMA FIFO channels
391 ******************************************************************************/
392
393struct nv03_channel_dma_v0 {
394 __u8 version;
395 __u8 chid;
396 __u8 pad02[2];
397 __u32 offset;
398 __u64 pushbuf;
399};
400
401struct nv50_channel_dma_v0 {
402 __u8 version;
403 __u8 chid;
404 __u8 pad02[6];
405 __u64 vm;
406 __u64 pushbuf;
407 __u64 offset;
408};
409
410#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
411
412/*******************************************************************************
413 * GPFIFO channels
414 ******************************************************************************/
415
416struct nv50_channel_gpfifo_v0 {
417 __u8 version;
418 __u8 chid;
419 __u8 pad02[2];
420 __u32 ilength;
421 __u64 ioffset;
422 __u64 pushbuf;
423 __u64 vm;
424};
425
426struct fermi_channel_gpfifo_v0 {
427 __u8 version;
428 __u8 chid;
429 __u8 pad02[2];
430 __u32 ilength;
431 __u64 ioffset;
432 __u64 vm;
433};
434
435struct kepler_channel_gpfifo_a_v0 {
436 __u8 version;
437#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01
438#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPDEC 0x02
439#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPPP 0x04
440#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSVLD 0x08
441#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10
442#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20
443#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
444 __u8 engine;
445 __u16 chid;
446 __u32 ilength;
447 __u64 ioffset;
448 __u64 vm;
449};
450
451/*******************************************************************************
452 * legacy display
453 ******************************************************************************/
454
455#define NV04_DISP_NTFY_VBLANK 0x00
456#define NV04_DISP_NTFY_CONN 0x01
457
458struct nv04_disp_mthd_v0 {
459 __u8 version;
460#define NV04_DISP_SCANOUTPOS 0x00
461 __u8 method;
462 __u8 head;
463 __u8 pad03[5];
464};
465
466struct nv04_disp_scanoutpos_v0 {
467 __u8 version;
468 __u8 pad01[7];
469 __s64 time[2];
470 __u16 vblanks;
471 __u16 vblanke;
472 __u16 vtotal;
473 __u16 vline;
474 __u16 hblanks;
475 __u16 hblanke;
476 __u16 htotal;
477 __u16 hline;
478};
479
480/*******************************************************************************
481 * display
482 ******************************************************************************/
483
484#define NV50_DISP_MTHD 0x00
485
486struct nv50_disp_mthd_v0 {
487 __u8 version;
488#define NV50_DISP_SCANOUTPOS 0x00
489 __u8 method;
490 __u8 head;
491 __u8 pad03[5];
492};
493
494struct nv50_disp_mthd_v1 {
495 __u8 version;
496#define NV50_DISP_MTHD_V1_DAC_PWR 0x10
497#define NV50_DISP_MTHD_V1_DAC_LOAD 0x11
498#define NV50_DISP_MTHD_V1_SOR_PWR 0x20
499#define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21
500#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
501#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
502#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
503#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
504 __u8 method;
505 __u16 hasht;
506 __u16 hashm;
507 __u8 pad06[2];
508};
509
510struct nv50_disp_dac_pwr_v0 {
511 __u8 version;
512 __u8 state;
513 __u8 data;
514 __u8 vsync;
515 __u8 hsync;
516 __u8 pad05[3];
517};
518
519struct nv50_disp_dac_load_v0 {
520 __u8 version;
521 __u8 load;
522 __u8 pad02[2];
523 __u32 data;
524};
525
526struct nv50_disp_sor_pwr_v0 {
527 __u8 version;
528 __u8 state;
529 __u8 pad02[6];
530};
531
532struct nv50_disp_sor_hda_eld_v0 {
533 __u8 version;
534 __u8 pad01[7];
535 __u8 data[];
536};
537
538struct nv50_disp_sor_hdmi_pwr_v0 {
539 __u8 version;
540 __u8 state;
541 __u8 max_ac_packet;
542 __u8 rekey;
543 __u8 pad04[4];
544};
545
546struct nv50_disp_sor_lvds_script_v0 {
547 __u8 version;
548 __u8 pad01[1];
549 __u16 script;
550 __u8 pad04[4];
551};
552
553struct nv50_disp_sor_dp_pwr_v0 {
554 __u8 version;
555 __u8 state;
556 __u8 pad02[6];
557};
558
559struct nv50_disp_pior_pwr_v0 {
560 __u8 version;
561 __u8 state;
562 __u8 type;
563 __u8 pad03[5];
564};
565
566/* core */
567struct nv50_disp_core_channel_dma_v0 {
568 __u8 version;
569 __u8 pad01[7];
570 __u64 pushbuf;
571};
572
573#define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
574
575/* cursor immediate */
576struct nv50_disp_cursor_v0 {
577 __u8 version;
578 __u8 head;
579 __u8 pad02[6];
580};
581
582#define NV50_DISP_CURSOR_V0_NTFY_UEVENT 0x00
583
584/* base */
585struct nv50_disp_base_channel_dma_v0 {
586 __u8 version;
587 __u8 head;
588 __u8 pad02[6];
589 __u64 pushbuf;
590};
591
592#define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
593
594/* overlay */
595struct nv50_disp_overlay_channel_dma_v0 {
596 __u8 version;
597 __u8 head;
598 __u8 pad02[6];
599 __u64 pushbuf;
600};
601
602#define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
603
604/* overlay immediate */
605struct nv50_disp_overlay_v0 {
606 __u8 version;
607 __u8 head;
608 __u8 pad02[6];
609};
610
611#define NV50_DISP_OVERLAY_V0_NTFY_UEVENT 0x00
612
613/*******************************************************************************
614 * software
615 ******************************************************************************/
616
617#define NVSW_NTFY_UEVENT 0x00
618
619#define NV04_NVSW_GET_REF 0x00
620
621struct nv04_nvsw_get_ref_v0 {
622 __u8 version;
623 __u8 pad01[3];
624 __u32 ref;
625};
626
627/*******************************************************************************
628 * fermi
629 ******************************************************************************/
630
631#define FERMI_A_ZBC_COLOR 0x00
632#define FERMI_A_ZBC_DEPTH 0x01
633
634struct fermi_a_zbc_color_v0 {
635 __u8 version;
636#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
637#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
638#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
639#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
640#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
641#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
642#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
643#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
644#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
645#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
646#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
647#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
648#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
649#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
650#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
651#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
652#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
653#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
654#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
655 __u8 format;
656 __u8 index;
657 __u8 pad03[5];
658 __u32 ds[4];
659 __u32 l2[4];
660};
661
662struct fermi_a_zbc_depth_v0 {
663 __u8 version;
664#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
665 __u8 format;
666 __u8 index;
667 __u8 pad03[5];
668 __u32 ds;
669 __u32 l2;
670};
671
672#endif 141#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 700a9b206726..e0ed2f4b2f43 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -2,7 +2,7 @@
2#define __NVIF_DEVICE_H__ 2#define __NVIF_DEVICE_H__
3 3
4#include <nvif/object.h> 4#include <nvif/object.h>
5#include <nvif/class.h> 5#include <nvif/cl0080.h>
6 6
7struct nvif_device { 7struct nvif_device {
8 struct nvif_object object; 8 struct nvif_object object;
@@ -63,6 +63,7 @@ u64 nvif_device_time(struct nvif_device *);
63#define nvxx_clk(a) nvxx_device(a)->clk 63#define nvxx_clk(a) nvxx_device(a)->clk
64#define nvxx_i2c(a) nvxx_device(a)->i2c 64#define nvxx_i2c(a) nvxx_device(a)->i2c
65#define nvxx_therm(a) nvxx_device(a)->therm 65#define nvxx_therm(a) nvxx_device(a)->therm
66#define nvxx_volt(a) nvxx_device(a)->volt
66 67
67#include <core/device.h> 68#include <core/device.h>
68#include <engine/fifo.h> 69#include <engine/fifo.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
new file mode 100644
index 000000000000..85c44e8a1201
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -0,0 +1,12 @@
1#ifndef __NVIF_IF0000_H__
2#define __NVIF_IF0000_H__
3
4#define NV_CLIENT_DEVLIST 0x00
5
6struct nv_client_devlist_v0 {
7 __u8 version;
8 __u8 count;
9 __u8 pad02[6];
10 __u64 device[];
11};
12#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0001.h b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
new file mode 100644
index 000000000000..bd5b64125eed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
@@ -0,0 +1,46 @@
1#ifndef __NVIF_IF0001_H__
2#define __NVIF_IF0001_H__
3
4#define NVIF_CONTROL_PSTATE_INFO 0x00
5#define NVIF_CONTROL_PSTATE_ATTR 0x01
6#define NVIF_CONTROL_PSTATE_USER 0x02
7
8struct nvif_control_pstate_info_v0 {
9 __u8 version;
10 __u8 count; /* out: number of power states */
11#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE (-1)
12#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_PERFMON (-2)
13 __s8 ustate_ac; /* out: target pstate index */
14 __s8 ustate_dc; /* out: target pstate index */
15 __s8 pwrsrc; /* out: current power source */
16#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN (-1)
17#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_PERFMON (-2)
18 __s8 pstate; /* out: current pstate index */
19 __u8 pad06[2];
20};
21
22struct nvif_control_pstate_attr_v0 {
23 __u8 version;
24#define NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT (-1)
25 __s8 state; /* in: index of pstate to query
26 * out: pstate identifier
27 */
28 __u8 index; /* in: index of attribute to query
29 * out: index of next attribute, or 0 if no more
30 */
31 __u8 pad03[5];
32 __u32 min;
33 __u32 max;
34 char name[32];
35 char unit[16];
36};
37
38struct nvif_control_pstate_user_v0 {
39 __u8 version;
40#define NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN (-1)
41#define NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON (-2)
42 __s8 ustate; /* in: pstate identifier */
43 __s8 pwrsrc; /* in: target power source */
44 __u8 pad03[5];
45};
46#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0002.h b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
new file mode 100644
index 000000000000..c04c91d0b818
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
@@ -0,0 +1,38 @@
1#ifndef __NVIF_IF0002_H__
2#define __NVIF_IF0002_H__
3
4#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
5#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
6#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
7
8struct nvif_perfmon_query_domain_v0 {
9 __u8 version;
10 __u8 id;
11 __u8 counter_nr;
12 __u8 iter;
13 __u16 signal_nr;
14 __u8 pad05[2];
15 char name[64];
16};
17
18struct nvif_perfmon_query_signal_v0 {
19 __u8 version;
20 __u8 domain;
21 __u16 iter;
22 __u8 signal;
23 __u8 source_nr;
24 __u8 pad05[2];
25 char name[64];
26};
27
28struct nvif_perfmon_query_source_v0 {
29 __u8 version;
30 __u8 domain;
31 __u8 signal;
32 __u8 iter;
33 __u8 pad04[4];
34 __u32 source;
35 __u32 mask;
36 char name[64];
37};
38#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0003.h b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
new file mode 100644
index 000000000000..0cd03efb80a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
@@ -0,0 +1,33 @@
1#ifndef __NVIF_IF0003_H__
2#define __NVIF_IF0003_H__
3
4struct nvif_perfdom_v0 {
5 __u8 version;
6 __u8 domain;
7 __u8 mode;
8 __u8 pad03[1];
9 struct {
10 __u8 signal[4];
11 __u64 source[4][8];
12 __u16 logic_op;
13 } ctr[4];
14};
15
16#define NVIF_PERFDOM_V0_INIT 0x00
17#define NVIF_PERFDOM_V0_SAMPLE 0x01
18#define NVIF_PERFDOM_V0_READ 0x02
19
20struct nvif_perfdom_init {
21};
22
23struct nvif_perfdom_sample {
24};
25
26struct nvif_perfdom_read_v0 {
27 __u8 version;
28 __u8 pad01[7];
29 __u32 ctr[4];
30 __u32 clk;
31 __u8 pad04[4];
32};
33#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0004.h b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
new file mode 100644
index 000000000000..bd5cd428cfd7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
@@ -0,0 +1,13 @@
1#ifndef __NVIF_IF0004_H__
2#define __NVIF_IF0004_H__
3
4#define NV04_NVSW_NTFY_UEVENT 0x00
5
6#define NV04_NVSW_GET_REF 0x00
7
8struct nv04_nvsw_get_ref_v0 {
9 __u8 version;
10 __u8 pad01[3];
11 __u32 ref;
12};
13#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0005.h b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
new file mode 100644
index 000000000000..abfd373bb68b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
@@ -0,0 +1,4 @@
1#ifndef __NVIF_IF0005_H__
2#define __NVIF_IF0005_H__
3#define NV10_NVSW_NTFY_UEVENT 0x00
4#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index b0ac0215ebf9..c5f5eb83a594 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -55,14 +55,6 @@ struct nvif_ioctl_new_v0 {
55 __u64 token; 55 __u64 token;
56 __u64 object; 56 __u64 object;
57 __u32 handle; 57 __u32 handle;
58/* these class numbers are made up by us, and not nvidia-assigned */
59#define NVIF_IOCTL_NEW_V0_CONTROL -1
60#define NVIF_IOCTL_NEW_V0_PERFMON -2
61#define NVIF_IOCTL_NEW_V0_PERFDOM -3
62#define NVIF_IOCTL_NEW_V0_SW_NV04 -4
63#define NVIF_IOCTL_NEW_V0_SW_NV10 -5
64#define NVIF_IOCTL_NEW_V0_SW_NV50 -6
65#define NVIF_IOCTL_NEW_V0_SW_GF100 -7
66 __s32 oclass; 58 __s32 oclass;
67 __u8 data[]; /* class data (class.h) */ 59 __u8 data[]; /* class data (class.h) */
68}; 60};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
index 5933188b4a77..751bcf4930a7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/unpack.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
@@ -1,24 +1,28 @@
1#ifndef __NVIF_UNPACK_H__ 1#ifndef __NVIF_UNPACK_H__
2#define __NVIF_UNPACK_H__ 2#define __NVIF_UNPACK_H__
3 3
4#define nvif_unvers(d) ({ \ 4#define nvif_unvers(r,d,s,m) ({ \
5 ret = (size == sizeof(d)) ? 0 : -ENOSYS; \ 5 void **_data = (d); __u32 *_size = (s); int _ret = (r); \
6 (ret == 0); \ 6 if (_ret == -ENOSYS && *_size == sizeof(m)) { \
7 *_data = NULL; \
8 *_size = _ret = 0; \
9 } \
10 _ret; \
7}) 11})
8 12
9#define nvif_unpack(d,vl,vh,m) ({ \ 13#define nvif_unpack(r,d,s,m,vl,vh,x) ({ \
10 if ((vl) == 0 || ret == -ENOSYS) { \ 14 void **_data = (d); __u32 *_size = (s); \
11 int _size = sizeof(d); \ 15 int _ret = (r), _vl = (vl), _vh = (vh); \
12 if (_size <= size && (d).version >= (vl) && \ 16 if (_ret == -ENOSYS && *_size >= sizeof(m) && \
13 (d).version <= (vh)) { \ 17 (m).version >= _vl && (m).version <= _vh) { \
14 data = (u8 *)data + _size; \ 18 *_data = (__u8 *)*_data + sizeof(m); \
15 size = size - _size; \ 19 *_size = *_size - sizeof(m); \
16 ret = ((m) || !size) ? 0 : -E2BIG; \ 20 if (_ret = 0, !(x)) { \
17 } else { \ 21 _ret = *_size ? -E2BIG : 0; \
18 ret = -ENOSYS; \ 22 *_data = NULL; \
23 *_size = 0; \
19 } \ 24 } \
20 } \ 25 } \
21 (ret == 0); \ 26 _ret; \
22}) 27})
23
24#endif 28#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 8f760002e401..913192c94876 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -159,7 +159,6 @@ struct nvkm_device_func {
159struct nvkm_device_quirk { 159struct nvkm_device_quirk {
160 u8 tv_pin_mask; 160 u8 tv_pin_mask;
161 u8 tv_gpio; 161 u8 tv_gpio;
162 bool War00C800_0;
163}; 162};
164 163
165struct nvkm_device_chip { 164struct nvkm_device_chip {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index 7cc2becabc69..d3bd250103d5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -13,6 +13,8 @@ struct nvbios_perfE {
13 u32 vdec; 13 u32 vdec;
14 u32 disp; 14 u32 disp;
15 u32 script; 15 u32 script;
16 u8 pcie_speed;
17 u8 pcie_width;
16}; 18};
17 19
18u16 nvbios_perf_entry(struct nvkm_bios *, int idx, 20u16 nvbios_perf_entry(struct nvkm_bios *, int idx,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index 8708f0a4e188..6b33bc058924 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -2,6 +2,7 @@
2#define __NVKM_CLK_H__ 2#define __NVKM_CLK_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4#include <core/notify.h> 4#include <core/notify.h>
5#include <subdev/pci.h>
5struct nvbios_pll; 6struct nvbios_pll;
6struct nvkm_pll_vals; 7struct nvkm_pll_vals;
7 8
@@ -38,7 +39,7 @@ enum nv_clk_src {
38 nv_clk_src_hubk06, 39 nv_clk_src_hubk06,
39 nv_clk_src_hubk07, 40 nv_clk_src_hubk07,
40 nv_clk_src_copy, 41 nv_clk_src_copy,
41 nv_clk_src_daemon, 42 nv_clk_src_pmu,
42 nv_clk_src_disp, 43 nv_clk_src_disp,
43 nv_clk_src_vdec, 44 nv_clk_src_vdec,
44 45
@@ -59,6 +60,8 @@ struct nvkm_pstate {
59 struct nvkm_cstate base; 60 struct nvkm_cstate base;
60 u8 pstate; 61 u8 pstate;
61 u8 fanspeed; 62 u8 fanspeed;
63 enum nvkm_pcie_speed pcie_speed;
64 u8 pcie_width;
62}; 65};
63 66
64struct nvkm_domain { 67struct nvkm_domain {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index c4dcd2680fe1..ea23e24a246c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -6,4 +6,5 @@ int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
6int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); 6int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
7int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); 7int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
8int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); 8int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
9int gm204_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
9#endif 10#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 3d4dbbf9aab3..0ffa2ec106d6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -37,4 +37,5 @@ int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
37int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 37int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
38int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 38int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
39int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 39int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
40int gm204_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
40#endif 41#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index fee0a97c44c5..ddb913889d7e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -2,6 +2,12 @@
2#define __NVKM_PCI_H__ 2#define __NVKM_PCI_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5enum nvkm_pcie_speed {
6 NVKM_PCIE_SPEED_2_5,
7 NVKM_PCIE_SPEED_5_0,
8 NVKM_PCIE_SPEED_8_0,
9};
10
5struct nvkm_pci { 11struct nvkm_pci {
6 const struct nvkm_pci_func *func; 12 const struct nvkm_pci_func *func;
7 struct nvkm_subdev subdev; 13 struct nvkm_subdev subdev;
@@ -18,6 +24,11 @@ struct nvkm_pci {
18 bool acquired; 24 bool acquired;
19 } agp; 25 } agp;
20 26
27 struct {
28 enum nvkm_pcie_speed speed;
29 u8 width;
30 } pcie;
31
21 bool msi; 32 bool msi;
22}; 33};
23 34
@@ -34,4 +45,9 @@ int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
34int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 45int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
35int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 46int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
36int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 47int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
48int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
49int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
50
51/* pcie functions */
52int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
37#endif 53#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 7f50cf5f929e..50f52ffe5b0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -25,6 +25,8 @@
25#include <nvif/driver.h> 25#include <nvif/driver.h>
26#include <nvif/ioctl.h> 26#include <nvif/ioctl.h>
27#include <nvif/class.h> 27#include <nvif/class.h>
28#include <nvif/cl0002.h>
29#include <nvif/cla06f.h>
28#include <nvif/unpack.h> 30#include <nvif/unpack.h>
29 31
30#include "nouveau_drm.h" 32#include "nouveau_drm.h"
@@ -87,18 +89,18 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
87{ 89{
88 switch (drm->device.info.family) { 90 switch (drm->device.info.family) {
89 case NV_DEVICE_INFO_V0_TNT: 91 case NV_DEVICE_INFO_V0_TNT:
90 return NVIF_IOCTL_NEW_V0_SW_NV04; 92 return NVIF_CLASS_SW_NV04;
91 case NV_DEVICE_INFO_V0_CELSIUS: 93 case NV_DEVICE_INFO_V0_CELSIUS:
92 case NV_DEVICE_INFO_V0_KELVIN: 94 case NV_DEVICE_INFO_V0_KELVIN:
93 case NV_DEVICE_INFO_V0_RANKINE: 95 case NV_DEVICE_INFO_V0_RANKINE:
94 case NV_DEVICE_INFO_V0_CURIE: 96 case NV_DEVICE_INFO_V0_CURIE:
95 return NVIF_IOCTL_NEW_V0_SW_NV10; 97 return NVIF_CLASS_SW_NV10;
96 case NV_DEVICE_INFO_V0_TESLA: 98 case NV_DEVICE_INFO_V0_TESLA:
97 return NVIF_IOCTL_NEW_V0_SW_NV50; 99 return NVIF_CLASS_SW_NV50;
98 case NV_DEVICE_INFO_V0_FERMI: 100 case NV_DEVICE_INFO_V0_FERMI:
99 case NV_DEVICE_INFO_V0_KEPLER: 101 case NV_DEVICE_INFO_V0_KEPLER:
100 case NV_DEVICE_INFO_V0_MAXWELL: 102 case NV_DEVICE_INFO_V0_MAXWELL:
101 return NVIF_IOCTL_NEW_V0_SW_GF100; 103 return NVIF_CLASS_SW_GF100;
102 } 104 }
103 105
104 return 0x0000; 106 return 0x0000;
@@ -355,9 +357,9 @@ nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
355 } *args = data; 357 } *args = data;
356 struct nouveau_abi16_chan *chan; 358 struct nouveau_abi16_chan *chan;
357 struct nouveau_abi16 *abi16; 359 struct nouveau_abi16 *abi16;
358 int ret; 360 int ret = -ENOSYS;
359 361
360 if (nvif_unpack(args->v0, 0, 0, true)) { 362 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
361 switch (args->v0.type) { 363 switch (args->v0.type) {
362 case NVIF_IOCTL_V0_NEW: 364 case NVIF_IOCTL_V0_NEW:
363 case NVIF_IOCTL_V0_MTHD: 365 case NVIF_IOCTL_V0_MTHD:
@@ -433,10 +435,10 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
433 /* nvsw: compatibility with older 0x*6e class identifier */ 435 /* nvsw: compatibility with older 0x*6e class identifier */
434 for (i = 0; !oclass && i < ret; i++) { 436 for (i = 0; !oclass && i < ret; i++) {
435 switch (sclass[i].oclass) { 437 switch (sclass[i].oclass) {
436 case NVIF_IOCTL_NEW_V0_SW_NV04: 438 case NVIF_CLASS_SW_NV04:
437 case NVIF_IOCTL_NEW_V0_SW_NV10: 439 case NVIF_CLASS_SW_NV10:
438 case NVIF_IOCTL_NEW_V0_SW_NV50: 440 case NVIF_CLASS_SW_NV50:
439 case NVIF_IOCTL_NEW_V0_SW_GF100: 441 case NVIF_CLASS_SW_GF100:
440 oclass = sclass[i].oclass; 442 oclass = sclass[i].oclass;
441 break; 443 break;
442 default: 444 default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 1860f389f21f..3f804a8c590c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -24,6 +24,11 @@
24 24
25#include <nvif/os.h> 25#include <nvif/os.h>
26#include <nvif/class.h> 26#include <nvif/class.h>
27#include <nvif/cl0002.h>
28#include <nvif/cl006b.h>
29#include <nvif/cl506f.h>
30#include <nvif/cl906f.h>
31#include <nvif/cla06f.h>
27#include <nvif/ioctl.h> 32#include <nvif/ioctl.h>
28 33
29/*XXX*/ 34/*XXX*/
@@ -378,7 +383,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
378 /* allocate software object class (used for fences on <= nv05) */ 383 /* allocate software object class (used for fences on <= nv05) */
379 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { 384 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
380 ret = nvif_object_init(&chan->user, 0x006e, 385 ret = nvif_object_init(&chan->user, 0x006e,
381 NVIF_IOCTL_NEW_V0_SW_NV04, 386 NVIF_CLASS_SW_NV04,
382 NULL, 0, &chan->nvsw); 387 NULL, 0, &chan->nvsw);
383 if (ret) 388 if (ret)
384 return ret; 389 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2e7cbe933533..fcebfae5d426 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -42,6 +42,8 @@
42#include "nouveau_encoder.h" 42#include "nouveau_encoder.h"
43#include "nouveau_crtc.h" 43#include "nouveau_crtc.h"
44 44
45#include <nvif/class.h>
46#include <nvif/cl0046.h>
45#include <nvif/event.h> 47#include <nvif/event.h>
46 48
47MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); 49MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
@@ -56,6 +58,10 @@ MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
56int nouveau_duallink = 1; 58int nouveau_duallink = 1;
57module_param_named(duallink, nouveau_duallink, int, 0400); 59module_param_named(duallink, nouveau_duallink, int, 0400);
58 60
61MODULE_PARM_DESC(hdmimhz, "Force a maximum HDMI pixel clock (in MHz)");
62int nouveau_hdmimhz = 0;
63module_param_named(hdmimhz, nouveau_hdmimhz, int, 0400);
64
59struct nouveau_encoder * 65struct nouveau_encoder *
60find_encoder(struct drm_connector *connector, int type) 66find_encoder(struct drm_connector *connector, int type)
61{ 67{
@@ -809,12 +815,23 @@ nouveau_connector_get_modes(struct drm_connector *connector)
809} 815}
810 816
811static unsigned 817static unsigned
812get_tmds_link_bandwidth(struct drm_connector *connector) 818get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
813{ 819{
814 struct nouveau_connector *nv_connector = nouveau_connector(connector); 820 struct nouveau_connector *nv_connector = nouveau_connector(connector);
815 struct nouveau_drm *drm = nouveau_drm(connector->dev); 821 struct nouveau_drm *drm = nouveau_drm(connector->dev);
816 struct dcb_output *dcb = nv_connector->detected_encoder->dcb; 822 struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
817 823
824 if (hdmi) {
825 if (nouveau_hdmimhz > 0)
826 return nouveau_hdmimhz * 1000;
827 /* Note: these limits are conservative, some Fermi's
828 * can do 297 MHz. Unclear how this can be determined.
829 */
830 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
831 return 297000;
832 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
833 return 225000;
834 }
818 if (dcb->location != DCB_LOC_ON_CHIP || 835 if (dcb->location != DCB_LOC_ON_CHIP ||
819 drm->device.info.chipset >= 0x46) 836 drm->device.info.chipset >= 0x46)
820 return 165000; 837 return 165000;
@@ -835,6 +852,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
835 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 852 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
836 unsigned min_clock = 25000, max_clock = min_clock; 853 unsigned min_clock = 25000, max_clock = min_clock;
837 unsigned clock = mode->clock; 854 unsigned clock = mode->clock;
855 bool hdmi;
838 856
839 switch (nv_encoder->dcb->type) { 857 switch (nv_encoder->dcb->type) {
840 case DCB_OUTPUT_LVDS: 858 case DCB_OUTPUT_LVDS:
@@ -847,8 +865,10 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
847 max_clock = 400000; 865 max_clock = 400000;
848 break; 866 break;
849 case DCB_OUTPUT_TMDS: 867 case DCB_OUTPUT_TMDS:
850 max_clock = get_tmds_link_bandwidth(connector); 868 hdmi = drm_detect_hdmi_monitor(nv_connector->edid);
851 if (nouveau_duallink && nv_encoder->dcb->duallink_possible) 869 max_clock = get_tmds_link_bandwidth(connector, hdmi);
870 if (!hdmi && nouveau_duallink &&
871 nv_encoder->dcb->duallink_possible)
852 max_clock *= 2; 872 max_clock *= 2;
853 break; 873 break;
854 case DCB_OUTPUT_ANALOG: 874 case DCB_OUTPUT_ANALOG:
@@ -898,8 +918,6 @@ nouveau_connector_helper_funcs = {
898static const struct drm_connector_funcs 918static const struct drm_connector_funcs
899nouveau_connector_funcs = { 919nouveau_connector_funcs = {
900 .dpms = drm_helper_connector_dpms, 920 .dpms = drm_helper_connector_dpms,
901 .save = NULL,
902 .restore = NULL,
903 .detect = nouveau_connector_detect, 921 .detect = nouveau_connector_detect,
904 .destroy = nouveau_connector_destroy, 922 .destroy = nouveau_connector_destroy,
905 .fill_modes = drm_helper_probe_single_connector_modes, 923 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -910,8 +928,6 @@ nouveau_connector_funcs = {
910static const struct drm_connector_funcs 928static const struct drm_connector_funcs
911nouveau_connector_funcs_lvds = { 929nouveau_connector_funcs_lvds = {
912 .dpms = drm_helper_connector_dpms, 930 .dpms = drm_helper_connector_dpms,
913 .save = NULL,
914 .restore = NULL,
915 .detect = nouveau_connector_detect_lvds, 931 .detect = nouveau_connector_detect_lvds,
916 .destroy = nouveau_connector_destroy, 932 .destroy = nouveau_connector_destroy,
917 .fill_modes = drm_helper_probe_single_connector_modes, 933 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -944,8 +960,6 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
944static const struct drm_connector_funcs 960static const struct drm_connector_funcs
945nouveau_connector_funcs_dp = { 961nouveau_connector_funcs_dp = {
946 .dpms = nouveau_connector_dp_dpms, 962 .dpms = nouveau_connector_dp_dpms,
947 .save = NULL,
948 .restore = NULL,
949 .detect = nouveau_connector_detect, 963 .detect = nouveau_connector_detect,
950 .destroy = nouveau_connector_destroy, 964 .destroy = nouveau_connector_destroy,
951 .fill_modes = drm_helper_probe_single_connector_modes, 965 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -969,10 +983,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
969 983
970 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); 984 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
971 985
986 mutex_lock(&drm->dev->mode_config.mutex);
972 if (plugged) 987 if (plugged)
973 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 988 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
974 else 989 else
975 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 990 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
991 mutex_unlock(&drm->dev->mode_config.mutex);
992
976 drm_helper_hpd_irq_event(connector->dev); 993 drm_helper_hpd_irq_event(connector->dev);
977 } 994 }
978 995
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index f19cb1c5fc5a..863f10b8d818 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -73,6 +73,9 @@ struct nouveau_crtc {
73 int (*set_dither)(struct nouveau_crtc *crtc, bool update); 73 int (*set_dither)(struct nouveau_crtc *crtc, bool update);
74 int (*set_scale)(struct nouveau_crtc *crtc, bool update); 74 int (*set_scale)(struct nouveau_crtc *crtc, bool update);
75 int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update); 75 int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
76
77 void (*save)(struct drm_crtc *crtc);
78 void (*restore)(struct drm_crtc *crtc);
76}; 79};
77 80
78static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) 81static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 5392e07edfc6..3d0dc199b253 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -28,6 +28,9 @@
28 * Ben Skeggs <bskeggs@redhat.com> 28 * Ben Skeggs <bskeggs@redhat.com>
29 */ 29 */
30 30
31#include <linux/debugfs.h>
32#include <nvif/class.h>
33#include <nvif/if0001.h>
31#include "nouveau_debugfs.h" 34#include "nouveau_debugfs.h"
32#include "nouveau_drm.h" 35#include "nouveau_drm.h"
33 36
@@ -43,22 +46,233 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
43 return 0; 46 return 0;
44} 47}
45 48
49static int
50nouveau_debugfs_pstate_get(struct seq_file *m, void *data)
51{
52 struct drm_info_node *node = (struct drm_info_node *) m->private;
53 struct nouveau_debugfs *debugfs = nouveau_debugfs(node->minor->dev);
54 struct nvif_object *ctrl = &debugfs->ctrl;
55 struct nvif_control_pstate_info_v0 info = {};
56 int ret, i;
57
58 if (!debugfs)
59 return -ENODEV;
60
61 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_INFO, &info, sizeof(info));
62 if (ret)
63 return ret;
64
65 for (i = 0; i < info.count + 1; i++) {
66 const s32 state = i < info.count ? i :
67 NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT;
68 struct nvif_control_pstate_attr_v0 attr = {
69 .state = state,
70 .index = 0,
71 };
72
73 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_ATTR,
74 &attr, sizeof(attr));
75 if (ret)
76 return ret;
77
78 if (i < info.count)
79 seq_printf(m, "%02x:", attr.state);
80 else
81 seq_printf(m, "%s:", info.pwrsrc == 0 ? "DC" :
82 info.pwrsrc == 1 ? "AC" : "--");
83
84 attr.index = 0;
85 do {
86 attr.state = state;
87 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_ATTR,
88 &attr, sizeof(attr));
89 if (ret)
90 return ret;
91
92 seq_printf(m, " %s %d", attr.name, attr.min);
93 if (attr.min != attr.max)
94 seq_printf(m, "-%d", attr.max);
95 seq_printf(m, " %s", attr.unit);
96 } while (attr.index);
97
98 if (state >= 0) {
99 if (info.ustate_ac == state)
100 seq_printf(m, " AC");
101 if (info.ustate_dc == state)
102 seq_printf(m, " DC");
103 if (info.pstate == state)
104 seq_printf(m, " *");
105 } else {
106 if (info.ustate_ac < -1)
107 seq_printf(m, " AC");
108 if (info.ustate_dc < -1)
109 seq_printf(m, " DC");
110 }
111
112 seq_printf(m, "\n");
113 }
114
115 return 0;
116}
117
118static ssize_t
119nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
120 size_t len, loff_t *offp)
121{
122 struct seq_file *m = file->private_data;
123 struct drm_info_node *node = (struct drm_info_node *) m->private;
124 struct nouveau_debugfs *debugfs = nouveau_debugfs(node->minor->dev);
125 struct nvif_object *ctrl = &debugfs->ctrl;
126 struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL };
127 char buf[32] = {}, *tmp, *cur = buf;
128 long value, ret;
129
130 if (!debugfs)
131 return -ENODEV;
132
133 if (len >= sizeof(buf))
134 return -EINVAL;
135
136 if (copy_from_user(buf, ubuf, len))
137 return -EFAULT;
138
139 if ((tmp = strchr(buf, '\n')))
140 *tmp = '\0';
141
142 if (!strncasecmp(cur, "dc:", 3)) {
143 args.pwrsrc = 0;
144 cur += 3;
145 } else
146 if (!strncasecmp(cur, "ac:", 3)) {
147 args.pwrsrc = 1;
148 cur += 3;
149 }
150
151 if (!strcasecmp(cur, "none"))
152 args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN;
153 else
154 if (!strcasecmp(cur, "auto"))
155 args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON;
156 else {
157 ret = kstrtol(cur, 16, &value);
158 if (ret)
159 return ret;
160 args.ustate = value;
161 }
162
163 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
164 if (ret < 0)
165 return ret;
166
167 return len;
168}
169
170static int
171nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
172{
173 return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
174}
175
176static const struct file_operations nouveau_pstate_fops = {
177 .owner = THIS_MODULE,
178 .open = nouveau_debugfs_pstate_open,
179 .read = seq_read,
180 .write = nouveau_debugfs_pstate_set,
181};
182
46static struct drm_info_list nouveau_debugfs_list[] = { 183static struct drm_info_list nouveau_debugfs_list[] = {
47 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, 184 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
48}; 185};
49#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 186#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
50 187
51int 188static const struct nouveau_debugfs_files {
52nouveau_debugfs_init(struct drm_minor *minor) 189 const char *name;
190 const struct file_operations *fops;
191} nouveau_debugfs_files[] = {
192 {"pstate", &nouveau_pstate_fops},
193};
194
195static int
196nouveau_debugfs_create_file(struct drm_minor *minor,
197 const struct nouveau_debugfs_files *ndf)
53{ 198{
54 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, 199 struct drm_info_node *node;
55 minor->debugfs_root, minor); 200
201 node = kmalloc(sizeof(*node), GFP_KERNEL);
202 if (node == NULL)
203 return -ENOMEM;
204
205 node->minor = minor;
206 node->info_ent = (const void *)ndf->fops;
207 node->dent = debugfs_create_file(ndf->name, S_IRUGO | S_IWUSR,
208 minor->debugfs_root, node, ndf->fops);
209 if (!node->dent) {
210 kfree(node);
211 return -ENOMEM;
212 }
213
214 mutex_lock(&minor->debugfs_lock);
215 list_add(&node->list, &minor->debugfs_list);
216 mutex_unlock(&minor->debugfs_lock);
56 return 0; 217 return 0;
57} 218}
58 219
220int
221nouveau_drm_debugfs_init(struct drm_minor *minor)
222{
223 int i, ret;
224
225 for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
226 ret = nouveau_debugfs_create_file(minor,
227 &nouveau_debugfs_files[i]);
228
229 if (ret)
230 return ret;
231 }
232
233 return drm_debugfs_create_files(nouveau_debugfs_list,
234 NOUVEAU_DEBUGFS_ENTRIES,
235 minor->debugfs_root, minor);
236}
237
59void 238void
60nouveau_debugfs_takedown(struct drm_minor *minor) 239nouveau_drm_debugfs_cleanup(struct drm_minor *minor)
61{ 240{
241 int i;
242
62 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, 243 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
63 minor); 244 minor);
245
246 for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
247 drm_debugfs_remove_files((struct drm_info_list *)
248 nouveau_debugfs_files[i].fops,
249 1, minor);
250 }
251}
252
253int
254nouveau_debugfs_init(struct nouveau_drm *drm)
255{
256 int ret;
257
258 drm->debugfs = kzalloc(sizeof(*drm->debugfs), GFP_KERNEL);
259 if (!drm->debugfs)
260 return -ENOMEM;
261
262 ret = nvif_object_init(&drm->device.object, 0, NVIF_CLASS_CONTROL,
263 NULL, 0, &drm->debugfs->ctrl);
264 if (ret)
265 return ret;
266
267 return 0;
268}
269
270void
271nouveau_debugfs_fini(struct nouveau_drm *drm)
272{
273 if (drm->debugfs && drm->debugfs->ctrl.priv)
274 nvif_object_fini(&drm->debugfs->ctrl);
275
276 kfree(drm->debugfs);
277 drm->debugfs = NULL;
64} 278}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index a62af6fb5f99..b8c03ff5bf05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -4,16 +4,43 @@
4#include <drm/drmP.h> 4#include <drm/drmP.h>
5 5
6#if defined(CONFIG_DEBUG_FS) 6#if defined(CONFIG_DEBUG_FS)
7extern int nouveau_debugfs_init(struct drm_minor *); 7
8extern void nouveau_debugfs_takedown(struct drm_minor *); 8#include "nouveau_drm.h"
9
10struct nouveau_debugfs {
11 struct nvif_object ctrl;
12};
13
14static inline struct nouveau_debugfs *
15nouveau_debugfs(struct drm_device *dev)
16{
17 return nouveau_drm(dev)->debugfs;
18}
19
20extern int nouveau_drm_debugfs_init(struct drm_minor *);
21extern void nouveau_drm_debugfs_cleanup(struct drm_minor *);
22extern int nouveau_debugfs_init(struct nouveau_drm *);
23extern void nouveau_debugfs_fini(struct nouveau_drm *);
9#else 24#else
10static inline int 25static inline int
11nouveau_debugfs_init(struct drm_minor *minor) 26nouveau_drm_debugfs_init(struct drm_minor *minor)
12{ 27{
13 return 0; 28 return 0;
14} 29}
15 30
16static inline void nouveau_debugfs_takedown(struct drm_minor *minor) 31static inline void
32nouveau_drm_debugfs_cleanup(struct drm_minor *minor)
33{
34}
35
36static inline int
37nouveau_debugfs_init(struct nouveau_drm *drm)
38{
39 return 0;
40}
41
42static inline void
43nouveau_debugfs_fini(struct nouveau_drm *drm)
17{ 44{
18} 45}
19 46
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 18676b8c1721..24be27d3cd18 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -39,6 +39,7 @@
39 39
40#include "nouveau_fence.h" 40#include "nouveau_fence.h"
41 41
42#include <nvif/cl0046.h>
42#include <nvif/event.h> 43#include <nvif/event.h>
43 44
44static int 45static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1d3ee5179ab8..2f2f252e3fb6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -37,12 +37,16 @@
37#include <core/pci.h> 37#include <core/pci.h>
38#include <core/tegra.h> 38#include <core/tegra.h>
39 39
40#include <nvif/class.h>
41#include <nvif/cl0002.h>
42#include <nvif/cla06f.h>
43#include <nvif/if0004.h>
44
40#include "nouveau_drm.h" 45#include "nouveau_drm.h"
41#include "nouveau_dma.h" 46#include "nouveau_dma.h"
42#include "nouveau_ttm.h" 47#include "nouveau_ttm.h"
43#include "nouveau_gem.h" 48#include "nouveau_gem.h"
44#include "nouveau_vga.h" 49#include "nouveau_vga.h"
45#include "nouveau_sysfs.h"
46#include "nouveau_hwmon.h" 50#include "nouveau_hwmon.h"
47#include "nouveau_acpi.h" 51#include "nouveau_acpi.h"
48#include "nouveau_bios.h" 52#include "nouveau_bios.h"
@@ -256,8 +260,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
256 } 260 }
257 261
258 ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete, 262 ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
259 false, NVSW_NTFY_UEVENT, NULL, 0, 0, 263 false, NV04_NVSW_NTFY_UEVENT,
260 &drm->flip); 264 NULL, 0, 0, &drm->flip);
261 if (ret == 0) 265 if (ret == 0)
262 ret = nvif_notify_get(&drm->flip); 266 ret = nvif_notify_get(&drm->flip);
263 if (ret) { 267 if (ret) {
@@ -448,7 +452,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
448 goto fail_dispinit; 452 goto fail_dispinit;
449 } 453 }
450 454
451 nouveau_sysfs_init(dev); 455 nouveau_debugfs_init(drm);
452 nouveau_hwmon_init(dev); 456 nouveau_hwmon_init(dev);
453 nouveau_accel_init(drm); 457 nouveau_accel_init(drm);
454 nouveau_fbcon_init(dev); 458 nouveau_fbcon_init(dev);
@@ -486,7 +490,7 @@ nouveau_drm_unload(struct drm_device *dev)
486 nouveau_fbcon_fini(dev); 490 nouveau_fbcon_fini(dev);
487 nouveau_accel_fini(drm); 491 nouveau_accel_fini(drm);
488 nouveau_hwmon_fini(dev); 492 nouveau_hwmon_fini(dev);
489 nouveau_sysfs_fini(dev); 493 nouveau_debugfs_fini(drm);
490 494
491 if (dev->mode_config.num_crtc) 495 if (dev->mode_config.num_crtc)
492 nouveau_display_fini(dev); 496 nouveau_display_fini(dev);
@@ -928,8 +932,8 @@ driver_stub = {
928 .lastclose = nouveau_vga_lastclose, 932 .lastclose = nouveau_vga_lastclose,
929 933
930#if defined(CONFIG_DEBUG_FS) 934#if defined(CONFIG_DEBUG_FS)
931 .debugfs_init = nouveau_debugfs_init, 935 .debugfs_init = nouveau_drm_debugfs_init,
932 .debugfs_cleanup = nouveau_debugfs_takedown, 936 .debugfs_cleanup = nouveau_drm_debugfs_cleanup,
933#endif 937#endif
934 938
935 .get_vblank_counter = drm_vblank_no_hw_counter, 939 .get_vblank_counter = drm_vblank_no_hw_counter,
@@ -1003,7 +1007,6 @@ static void nouveau_display_options(void)
1003 DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset); 1007 DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
1004 DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm); 1008 DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
1005 DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf); 1009 DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
1006 DRM_DEBUG_DRIVER("... pstate : %d\n", nouveau_pstate);
1007} 1010}
1008 1011
1009static const struct dev_pm_ops nouveau_pm_ops = { 1012static const struct dev_pm_ops nouveau_pm_ops = {
@@ -1046,10 +1049,6 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
1046 goto err_free; 1049 goto err_free;
1047 } 1050 }
1048 1051
1049 err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev));
1050 if (err < 0)
1051 goto err_free;
1052
1053 drm->platformdev = pdev; 1052 drm->platformdev = pdev;
1054 platform_set_drvdata(pdev, drm); 1053 platform_set_drvdata(pdev, drm);
1055 1054
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a02813e994ec..5c363ed1c842 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -164,7 +164,7 @@ struct nouveau_drm {
164 164
165 /* power management */ 165 /* power management */
166 struct nouveau_hwmon *hwmon; 166 struct nouveau_hwmon *hwmon;
167 struct nouveau_sysfs *sysfs; 167 struct nouveau_debugfs *debugfs;
168 168
169 /* display power reference */ 169 /* display power reference */
170 bool have_disp_power_ref; 170 bool have_disp_power_ref;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index b37da95105b0..ee6a6d3fc80f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -63,6 +63,9 @@ struct nouveau_encoder {
63 u32 datarate; 63 u32 datarate;
64 } dp; 64 } dp;
65 }; 65 };
66
67 void (*enc_save)(struct drm_encoder *encoder);
68 void (*enc_restore)(struct drm_encoder *encoder);
66}; 69};
67 70
68struct nouveau_encoder * 71struct nouveau_encoder *
@@ -80,7 +83,7 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
80 return &enc->base.base; 83 return &enc->base.base;
81} 84}
82 85
83static inline struct drm_encoder_slave_funcs * 86static inline const struct drm_encoder_slave_funcs *
84get_slave_funcs(struct drm_encoder *enc) 87get_slave_funcs(struct drm_encoder *enc)
85{ 88{
86 return to_encoder_slave(enc)->slave_funcs; 89 return to_encoder_slave(enc)->slave_funcs;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 574c36b492ee..9a8c5b727f59 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -30,6 +30,7 @@
30#include <linux/hrtimer.h> 30#include <linux/hrtimer.h>
31#include <trace/events/fence.h> 31#include <trace/events/fence.h>
32 32
33#include <nvif/cl826e.h>
33#include <nvif/notify.h> 34#include <nvif/notify.h>
34#include <nvif/event.h> 35#include <nvif/event.h>
35 36
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 491c7149d197..8e13467d0ddb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -34,6 +34,8 @@
34#include "nouveau_drm.h" 34#include "nouveau_drm.h"
35#include "nouveau_hwmon.h" 35#include "nouveau_hwmon.h"
36 36
37#include <nvkm/subdev/volt.h>
38
37#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 39#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
38static ssize_t 40static ssize_t
39nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 41nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
@@ -512,6 +514,35 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
512 nouveau_hwmon_get_pwm1_max, 514 nouveau_hwmon_get_pwm1_max,
513 nouveau_hwmon_set_pwm1_max, 0); 515 nouveau_hwmon_set_pwm1_max, 0);
514 516
517static ssize_t
518nouveau_hwmon_get_in0_input(struct device *d,
519 struct device_attribute *a, char *buf)
520{
521 struct drm_device *dev = dev_get_drvdata(d);
522 struct nouveau_drm *drm = nouveau_drm(dev);
523 struct nvkm_volt *volt = nvxx_volt(&drm->device);
524 int ret;
525
526 ret = nvkm_volt_get(volt);
527 if (ret < 0)
528 return ret;
529
530 return sprintf(buf, "%i\n", ret / 1000);
531}
532
533static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
534 nouveau_hwmon_get_in0_input, NULL, 0);
535
536static ssize_t
537nouveau_hwmon_get_in0_label(struct device *d,
538 struct device_attribute *a, char *buf)
539{
540 return sprintf(buf, "GPU core\n");
541}
542
543static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO,
544 nouveau_hwmon_get_in0_label, NULL, 0);
545
515static struct attribute *hwmon_default_attributes[] = { 546static struct attribute *hwmon_default_attributes[] = {
516 &sensor_dev_attr_name.dev_attr.attr, 547 &sensor_dev_attr_name.dev_attr.attr,
517 &sensor_dev_attr_update_rate.dev_attr.attr, 548 &sensor_dev_attr_update_rate.dev_attr.attr,
@@ -542,6 +573,12 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
542 NULL 573 NULL
543}; 574};
544 575
576static struct attribute *hwmon_in0_attributes[] = {
577 &sensor_dev_attr_in0_input.dev_attr.attr,
578 &sensor_dev_attr_in0_label.dev_attr.attr,
579 NULL
580};
581
545static const struct attribute_group hwmon_default_attrgroup = { 582static const struct attribute_group hwmon_default_attrgroup = {
546 .attrs = hwmon_default_attributes, 583 .attrs = hwmon_default_attributes,
547}; 584};
@@ -554,6 +591,9 @@ static const struct attribute_group hwmon_fan_rpm_attrgroup = {
554static const struct attribute_group hwmon_pwm_fan_attrgroup = { 591static const struct attribute_group hwmon_pwm_fan_attrgroup = {
555 .attrs = hwmon_pwm_fan_attributes, 592 .attrs = hwmon_pwm_fan_attributes,
556}; 593};
594static const struct attribute_group hwmon_in0_attrgroup = {
595 .attrs = hwmon_in0_attributes,
596};
557#endif 597#endif
558 598
559int 599int
@@ -562,6 +602,7 @@ nouveau_hwmon_init(struct drm_device *dev)
562#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 602#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
563 struct nouveau_drm *drm = nouveau_drm(dev); 603 struct nouveau_drm *drm = nouveau_drm(dev);
564 struct nvkm_therm *therm = nvxx_therm(&drm->device); 604 struct nvkm_therm *therm = nvxx_therm(&drm->device);
605 struct nvkm_volt *volt = nvxx_volt(&drm->device);
565 struct nouveau_hwmon *hwmon; 606 struct nouveau_hwmon *hwmon;
566 struct device *hwmon_dev; 607 struct device *hwmon_dev;
567 int ret = 0; 608 int ret = 0;
@@ -613,6 +654,14 @@ nouveau_hwmon_init(struct drm_device *dev)
613 goto error; 654 goto error;
614 } 655 }
615 656
657 if (volt && nvkm_volt_get(volt) >= 0) {
658 ret = sysfs_create_group(&hwmon_dev->kobj,
659 &hwmon_in0_attrgroup);
660
661 if (ret)
662 goto error;
663 }
664
616 hwmon->hwmon = hwmon_dev; 665 hwmon->hwmon = hwmon_dev;
617 666
618 return 0; 667 return 0;
@@ -638,6 +687,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
638 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_temp_attrgroup); 687 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_temp_attrgroup);
639 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup); 688 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
640 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup); 689 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
690 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup);
641 691
642 hwmon_device_unregister(hwmon->hwmon); 692 hwmon_device_unregister(hwmon->hwmon);
643 } 693 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 60e32c4e4e49..8a70cec59bcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -78,3 +78,14 @@ struct platform_driver nouveau_platform_driver = {
78 .probe = nouveau_platform_probe, 78 .probe = nouveau_platform_probe,
79 .remove = nouveau_platform_remove, 79 .remove = nouveau_platform_remove,
80}; 80};
81
82#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
83MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
84MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
85MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
86MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
87MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
88MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
89MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
90MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
91#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
deleted file mode 100644
index 5dac3546c1b8..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ /dev/null
@@ -1,197 +0,0 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <nvif/os.h>
26#include <nvif/class.h>
27#include <nvif/ioctl.h>
28
29#include "nouveau_sysfs.h"
30
31MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future");
32int nouveau_pstate;
33module_param_named(pstate, nouveau_pstate, int, 0400);
34
35static inline struct drm_device *
36drm_device(struct device *d)
37{
38 return dev_get_drvdata(d);
39}
40
41#define snappendf(p,r,f,a...) do { \
42 snprintf(p, r, f, ##a); \
43 r -= strlen(p); \
44 p += strlen(p); \
45} while(0)
46
47static ssize_t
48nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
49{
50 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
51 struct nvif_control_pstate_info_v0 info = {};
52 size_t cnt = PAGE_SIZE;
53 char *buf = b;
54 int ret, i;
55
56 ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_INFO,
57 &info, sizeof(info));
58 if (ret)
59 return ret;
60
61 for (i = 0; i < info.count + 1; i++) {
62 const s32 state = i < info.count ? i :
63 NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT;
64 struct nvif_control_pstate_attr_v0 attr = {
65 .state = state,
66 .index = 0,
67 };
68
69 ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR,
70 &attr, sizeof(attr));
71 if (ret)
72 return ret;
73
74 if (i < info.count)
75 snappendf(buf, cnt, "%02x:", attr.state);
76 else
77 snappendf(buf, cnt, "%s:", info.pwrsrc == 0 ? "DC" :
78 info.pwrsrc == 1 ? "AC" :
79 "--");
80
81 attr.index = 0;
82 do {
83 attr.state = state;
84 ret = nvif_mthd(&sysfs->ctrl,
85 NVIF_CONTROL_PSTATE_ATTR,
86 &attr, sizeof(attr));
87 if (ret)
88 return ret;
89
90 snappendf(buf, cnt, " %s %d", attr.name, attr.min);
91 if (attr.min != attr.max)
92 snappendf(buf, cnt, "-%d", attr.max);
93 snappendf(buf, cnt, " %s", attr.unit);
94 } while (attr.index);
95
96 if (state >= 0) {
97 if (info.ustate_ac == state)
98 snappendf(buf, cnt, " AC");
99 if (info.ustate_dc == state)
100 snappendf(buf, cnt, " DC");
101 if (info.pstate == state)
102 snappendf(buf, cnt, " *");
103 } else {
104 if (info.ustate_ac < -1)
105 snappendf(buf, cnt, " AC");
106 if (info.ustate_dc < -1)
107 snappendf(buf, cnt, " DC");
108 }
109
110 snappendf(buf, cnt, "\n");
111 }
112
113 return strlen(b);
114}
115
116static ssize_t
117nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
118 const char *buf, size_t count)
119{
120 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
121 struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL };
122 long value, ret;
123 char *tmp;
124
125 if ((tmp = strchr(buf, '\n')))
126 *tmp = '\0';
127
128 if (!strncasecmp(buf, "dc:", 3)) {
129 args.pwrsrc = 0;
130 buf += 3;
131 } else
132 if (!strncasecmp(buf, "ac:", 3)) {
133 args.pwrsrc = 1;
134 buf += 3;
135 }
136
137 if (!strcasecmp(buf, "none"))
138 args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN;
139 else
140 if (!strcasecmp(buf, "auto"))
141 args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON;
142 else {
143 ret = kstrtol(buf, 16, &value);
144 if (ret)
145 return ret;
146 args.ustate = value;
147 }
148
149 ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_USER,
150 &args, sizeof(args));
151 if (ret < 0)
152 return ret;
153
154 return count;
155}
156
157static DEVICE_ATTR(pstate, S_IRUGO | S_IWUSR,
158 nouveau_sysfs_pstate_get, nouveau_sysfs_pstate_set);
159
160void
161nouveau_sysfs_fini(struct drm_device *dev)
162{
163 struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
164 struct nouveau_drm *drm = nouveau_drm(dev);
165 struct nvif_device *device = &drm->device;
166
167 if (sysfs && sysfs->ctrl.priv) {
168 device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
169 nvif_object_fini(&sysfs->ctrl);
170 }
171
172 drm->sysfs = NULL;
173 kfree(sysfs);
174}
175
176int
177nouveau_sysfs_init(struct drm_device *dev)
178{
179 struct nouveau_drm *drm = nouveau_drm(dev);
180 struct nvif_device *device = &drm->device;
181 struct nouveau_sysfs *sysfs;
182 int ret;
183
184 if (!nouveau_pstate)
185 return 0;
186
187 sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
188 if (!sysfs)
189 return -ENOMEM;
190
191 ret = nvif_object_init(&device->object, 0, NVIF_IOCTL_NEW_V0_CONTROL,
192 NULL, 0, &sysfs->ctrl);
193 if (ret == 0)
194 device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
195
196 return 0;
197}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
deleted file mode 100644
index 4e5ea9241b28..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __NOUVEAU_SYSFS_H__
2#define __NOUVEAU_SYSFS_H__
3
4#include "nouveau_drm.h"
5
6struct nouveau_sysfs {
7 struct nvif_object ctrl;
8};
9
10static inline struct nouveau_sysfs *
11nouveau_sysfs(struct drm_device *dev)
12{
13 return nouveau_drm(dev)->sysfs;
14}
15
16int nouveau_sysfs_init(struct drm_device *);
17void nouveau_sysfs_fini(struct drm_device *);
18
19extern int nouveau_pstate;
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 6ae1b3494bcd..e9f52ef0be83 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -130,20 +130,21 @@ usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
130 struct nvif_notify_req_v0 v0; 130 struct nvif_notify_req_v0 v0;
131 } *req; 131 } *req;
132 struct usif_notify *ntfy; 132 struct usif_notify *ntfy;
133 int ret; 133 int ret = -ENOSYS;
134 134
135 if (nvif_unpack(args->v0, 0, 0, true)) { 135 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
136 if (usif_notify_find(f, args->v0.index)) 136 if (usif_notify_find(f, args->v0.index))
137 return -EEXIST; 137 return -EEXIST;
138 } else 138 } else
139 return ret; 139 return ret;
140 req = data; 140 req = data;
141 ret = -ENOSYS;
141 142
142 if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL))) 143 if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL)))
143 return -ENOMEM; 144 return -ENOMEM;
144 atomic_set(&ntfy->enabled, 0); 145 atomic_set(&ntfy->enabled, 0);
145 146
146 if (nvif_unpack(req->v0, 0, 0, true)) { 147 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) {
147 ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply; 148 ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply;
148 ntfy->route = req->v0.route; 149 ntfy->route = req->v0.route;
149 ntfy->token = req->v0.token; 150 ntfy->token = req->v0.token;
@@ -171,9 +172,9 @@ usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
171 struct nvif_ioctl_ntfy_del_v0 v0; 172 struct nvif_ioctl_ntfy_del_v0 v0;
172 } *args = data; 173 } *args = data;
173 struct usif_notify *ntfy; 174 struct usif_notify *ntfy;
174 int ret; 175 int ret = -ENOSYS;
175 176
176 if (nvif_unpack(args->v0, 0, 0, true)) { 177 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
177 if (!(ntfy = usif_notify_find(f, args->v0.index))) 178 if (!(ntfy = usif_notify_find(f, args->v0.index)))
178 return -ENOENT; 179 return -ENOENT;
179 } else 180 } else
@@ -194,9 +195,9 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
194 struct nvif_ioctl_ntfy_del_v0 v0; 195 struct nvif_ioctl_ntfy_del_v0 v0;
195 } *args = data; 196 } *args = data;
196 struct usif_notify *ntfy; 197 struct usif_notify *ntfy;
197 int ret; 198 int ret = -ENOSYS;
198 199
199 if (nvif_unpack(args->v0, 0, 0, true)) { 200 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
200 if (!(ntfy = usif_notify_find(f, args->v0.index))) 201 if (!(ntfy = usif_notify_find(f, args->v0.index)))
201 return -ENOENT; 202 return -ENOENT;
202 } else 203 } else
@@ -233,9 +234,9 @@ usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
233 struct nvif_ioctl_ntfy_put_v0 v0; 234 struct nvif_ioctl_ntfy_put_v0 v0;
234 } *args = data; 235 } *args = data;
235 struct usif_notify *ntfy; 236 struct usif_notify *ntfy;
236 int ret; 237 int ret = -ENOSYS;
237 238
238 if (nvif_unpack(args->v0, 0, 0, true)) { 239 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
239 if (!(ntfy = usif_notify_find(f, args->v0.index))) 240 if (!(ntfy = usif_notify_find(f, args->v0.index)))
240 return -ENOENT; 241 return -ENOENT;
241 } else 242 } else
@@ -270,13 +271,13 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
270 struct nvif_ioctl_new_v0 v0; 271 struct nvif_ioctl_new_v0 v0;
271 } *args = data; 272 } *args = data;
272 struct usif_object *object; 273 struct usif_object *object;
273 int ret; 274 int ret = -ENOSYS;
274 275
275 if (!(object = kmalloc(sizeof(*object), GFP_KERNEL))) 276 if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
276 return -ENOMEM; 277 return -ENOMEM;
277 list_add(&object->head, &cli->objects); 278 list_add(&object->head, &cli->objects);
278 279
279 if (nvif_unpack(args->v0, 0, 0, true)) { 280 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
280 object->route = args->v0.route; 281 object->route = args->v0.route;
281 object->token = args->v0.token; 282 object->token = args->v0.token;
282 args->v0.route = NVDRM_OBJECT_USIF; 283 args->v0.route = NVDRM_OBJECT_USIF;
@@ -310,7 +311,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
310 if (ret = -EFAULT, copy_from_user(argv, user, size)) 311 if (ret = -EFAULT, copy_from_user(argv, user, size))
311 goto done; 312 goto done;
312 313
313 if (nvif_unpack(argv->v0, 0, 0, true)) { 314 if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
314 /* block access to objects not created via this interface */ 315 /* block access to objects not created via this interface */
315 owner = argv->v0.owner; 316 owner = argv->v0.owner;
316 if (argv->v0.object == 0ULL) 317 if (argv->v0.object == 0ULL)
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index f3d705d67738..3022d24ed88b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -26,6 +26,8 @@
26#include "nouveau_dma.h" 26#include "nouveau_dma.h"
27#include "nouveau_fence.h" 27#include "nouveau_fence.h"
28 28
29#include <nvif/if0004.h>
30
29struct nv04_fence_chan { 31struct nv04_fence_chan {
30 struct nouveau_fence_chan base; 32 struct nouveau_fence_chan base;
31}; 33};
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 80b6eb8b3d02..6a141c9bf5b7 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -24,6 +24,7 @@
24 24
25#include <nvif/os.h> 25#include <nvif/os.h>
26#include <nvif/class.h> 26#include <nvif/class.h>
27#include <nvif/cl0002.h>
27 28
28#include "nouveau_drm.h" 29#include "nouveau_drm.h"
29#include "nouveau_dma.h" 30#include "nouveau_dma.h"
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index c053c50b346a..ea3921652449 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -28,8 +28,16 @@
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/drm_plane_helper.h> 29#include <drm/drm_plane_helper.h>
30#include <drm/drm_dp_helper.h> 30#include <drm/drm_dp_helper.h>
31#include <drm/drm_fb_helper.h>
31 32
32#include <nvif/class.h> 33#include <nvif/class.h>
34#include <nvif/cl0002.h>
35#include <nvif/cl5070.h>
36#include <nvif/cl507a.h>
37#include <nvif/cl507b.h>
38#include <nvif/cl507c.h>
39#include <nvif/cl507d.h>
40#include <nvif/cl507e.h>
33 41
34#include "nouveau_drm.h" 42#include "nouveau_drm.h"
35#include "nouveau_dma.h" 43#include "nouveau_dma.h"
@@ -773,7 +781,6 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
773 */ 781 */
774 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || 782 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
775 (nv_connector->underscan == UNDERSCAN_AUTO && 783 (nv_connector->underscan == UNDERSCAN_AUTO &&
776 nv_connector->edid &&
777 drm_detect_hdmi_monitor(nv_connector->edid)))) { 784 drm_detect_hdmi_monitor(nv_connector->edid)))) {
778 u32 bX = nv_connector->underscan_hborder; 785 u32 bX = nv_connector->underscan_hborder;
779 u32 bY = nv_connector->underscan_vborder; 786 u32 bY = nv_connector->underscan_vborder;
@@ -1717,7 +1724,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1717 encoder = to_drm_encoder(nv_encoder); 1724 encoder = to_drm_encoder(nv_encoder);
1718 encoder->possible_crtcs = dcbe->heads; 1725 encoder->possible_crtcs = dcbe->heads;
1719 encoder->possible_clones = 0; 1726 encoder->possible_clones = 0;
1720 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type); 1727 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL);
1721 drm_encoder_helper_add(encoder, &nv50_dac_hfunc); 1728 drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
1722 1729
1723 drm_mode_connector_attach_encoder(connector, encoder); 1730 drm_mode_connector_attach_encoder(connector, encoder);
@@ -1961,10 +1968,17 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1961 switch (nv_encoder->dcb->type) { 1968 switch (nv_encoder->dcb->type) {
1962 case DCB_OUTPUT_TMDS: 1969 case DCB_OUTPUT_TMDS:
1963 if (nv_encoder->dcb->sorconf.link & 1) { 1970 if (nv_encoder->dcb->sorconf.link & 1) {
1964 if (mode->clock < 165000) 1971 proto = 0x1;
1965 proto = 0x1; 1972 /* Only enable dual-link if:
1966 else 1973 * - Need to (i.e. rate > 165MHz)
1967 proto = 0x5; 1974 * - DCB says we can
1975 * - Not an HDMI monitor, since there's no dual-link
1976 * on HDMI.
1977 */
1978 if (mode->clock >= 165000 &&
1979 nv_encoder->dcb->duallink_possible &&
1980 !drm_detect_hdmi_monitor(nv_connector->edid))
1981 proto |= 0x4;
1968 } else { 1982 } else {
1969 proto = 0x2; 1983 proto = 0x2;
1970 } 1984 }
@@ -2125,7 +2139,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
2125 encoder = to_drm_encoder(nv_encoder); 2139 encoder = to_drm_encoder(nv_encoder);
2126 encoder->possible_crtcs = dcbe->heads; 2140 encoder->possible_crtcs = dcbe->heads;
2127 encoder->possible_clones = 0; 2141 encoder->possible_clones = 0;
2128 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type); 2142 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL);
2129 drm_encoder_helper_add(encoder, &nv50_sor_hfunc); 2143 drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
2130 2144
2131 drm_mode_connector_attach_encoder(connector, encoder); 2145 drm_mode_connector_attach_encoder(connector, encoder);
@@ -2305,7 +2319,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
2305 encoder = to_drm_encoder(nv_encoder); 2319 encoder = to_drm_encoder(nv_encoder);
2306 encoder->possible_crtcs = dcbe->heads; 2320 encoder->possible_crtcs = dcbe->heads;
2307 encoder->possible_clones = 0; 2321 encoder->possible_clones = 0;
2308 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type); 2322 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL);
2309 drm_encoder_helper_add(encoder, &nv50_pior_hfunc); 2323 drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
2310 2324
2311 drm_mode_connector_attach_encoder(connector, encoder); 2325 drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f0d96e5da6b4..3695ccce68c7 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -24,6 +24,7 @@
24 24
25#include <nvif/os.h> 25#include <nvif/os.h>
26#include <nvif/class.h> 26#include <nvif/class.h>
27#include <nvif/cl0002.h>
27 28
28#include "nouveau_drm.h" 29#include "nouveau_drm.h"
29#include "nouveau_dma.h" 30#include "nouveau_dma.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 297e1e953fa6..e1943910858e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -28,6 +28,7 @@
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/event.h> 30#include <nvif/event.h>
31#include <nvif/if0000.h>
31#include <nvif/unpack.h> 32#include <nvif/unpack.h>
32 33
33struct nvkm_client_notify { 34struct nvkm_client_notify {
@@ -96,7 +97,7 @@ nvkm_client_notify_new(struct nvkm_object *object,
96 struct nvif_notify_req_v0 v0; 97 struct nvif_notify_req_v0 v0;
97 } *req = data; 98 } *req = data;
98 u8 index, reply; 99 u8 index, reply;
99 int ret; 100 int ret = -ENOSYS;
100 101
101 for (index = 0; index < ARRAY_SIZE(client->notify); index++) { 102 for (index = 0; index < ARRAY_SIZE(client->notify); index++) {
102 if (!client->notify[index]) 103 if (!client->notify[index])
@@ -111,7 +112,7 @@ nvkm_client_notify_new(struct nvkm_object *object,
111 return -ENOMEM; 112 return -ENOMEM;
112 113
113 nvif_ioctl(object, "notify new size %d\n", size); 114 nvif_ioctl(object, "notify new size %d\n", size);
114 if (nvif_unpack(req->v0, 0, 0, true)) { 115 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) {
115 nvif_ioctl(object, "notify new vers %d reply %d route %02x " 116 nvif_ioctl(object, "notify new vers %d reply %d route %02x "
116 "token %llx\n", req->v0.version, 117 "token %llx\n", req->v0.version,
117 req->v0.reply, req->v0.route, req->v0.token); 118 req->v0.reply, req->v0.route, req->v0.token);
@@ -143,10 +144,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
143 union { 144 union {
144 struct nv_client_devlist_v0 v0; 145 struct nv_client_devlist_v0 v0;
145 } *args = data; 146 } *args = data;
146 int ret; 147 int ret = -ENOSYS;
147 148
148 nvif_ioctl(object, "client devlist size %d\n", size); 149 nvif_ioctl(object, "client devlist size %d\n", size);
149 if (nvif_unpack(args->v0, 0, 0, true)) { 150 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
150 nvif_ioctl(object, "client devlist vers %d count %d\n", 151 nvif_ioctl(object, "client devlist vers %d count %d\n",
151 args->v0.version, args->v0.count); 152 args->v0.version, args->v0.count);
152 if (size == sizeof(args->v0.device[0]) * args->v0.count) { 153 if (size == sizeof(args->v0.device[0]) * args->v0.count) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index d87d6ab03cc7..b0db51847c36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -34,10 +34,10 @@ nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
34 union { 34 union {
35 struct nvif_ioctl_nop_v0 v0; 35 struct nvif_ioctl_nop_v0 v0;
36 } *args = data; 36 } *args = data;
37 int ret; 37 int ret = -ENOSYS;
38 38
39 nvif_ioctl(object, "nop size %d\n", size); 39 nvif_ioctl(object, "nop size %d\n", size);
40 if (nvif_unpack(args->v0, 0, 0, false)) { 40 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
41 nvif_ioctl(object, "nop vers %lld\n", args->v0.version); 41 nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
42 args->v0.version = NVIF_VERSION_LATEST; 42 args->v0.version = NVIF_VERSION_LATEST;
43 } 43 }
@@ -52,10 +52,10 @@ nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
52 struct nvif_ioctl_sclass_v0 v0; 52 struct nvif_ioctl_sclass_v0 v0;
53 } *args = data; 53 } *args = data;
54 struct nvkm_oclass oclass; 54 struct nvkm_oclass oclass;
55 int ret, i = 0; 55 int ret = -ENOSYS, i = 0;
56 56
57 nvif_ioctl(object, "sclass size %d\n", size); 57 nvif_ioctl(object, "sclass size %d\n", size);
58 if (nvif_unpack(args->v0, 0, 0, true)) { 58 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
59 nvif_ioctl(object, "sclass vers %d count %d\n", 59 nvif_ioctl(object, "sclass vers %d count %d\n",
60 args->v0.version, args->v0.count); 60 args->v0.version, args->v0.count);
61 if (size != args->v0.count * sizeof(args->v0.oclass[0])) 61 if (size != args->v0.count * sizeof(args->v0.oclass[0]))
@@ -86,10 +86,10 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
86 struct nvkm_client *client = parent->client; 86 struct nvkm_client *client = parent->client;
87 struct nvkm_object *object = NULL; 87 struct nvkm_object *object = NULL;
88 struct nvkm_oclass oclass; 88 struct nvkm_oclass oclass;
89 int ret, i = 0; 89 int ret = -ENOSYS, i = 0;
90 90
91 nvif_ioctl(parent, "new size %d\n", size); 91 nvif_ioctl(parent, "new size %d\n", size);
92 if (nvif_unpack(args->v0, 0, 0, true)) { 92 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
93 nvif_ioctl(parent, "new vers %d handle %08x class %08x " 93 nvif_ioctl(parent, "new vers %d handle %08x class %08x "
94 "route %02x token %llx object %016llx\n", 94 "route %02x token %llx object %016llx\n",
95 args->v0.version, args->v0.handle, args->v0.oclass, 95 args->v0.version, args->v0.handle, args->v0.oclass,
@@ -147,10 +147,10 @@ nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
147 union { 147 union {
148 struct nvif_ioctl_del none; 148 struct nvif_ioctl_del none;
149 } *args = data; 149 } *args = data;
150 int ret; 150 int ret = -ENOSYS;
151 151
152 nvif_ioctl(object, "delete size %d\n", size); 152 nvif_ioctl(object, "delete size %d\n", size);
153 if (nvif_unvers(args->none)) { 153 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
154 nvif_ioctl(object, "delete\n"); 154 nvif_ioctl(object, "delete\n");
155 nvkm_object_fini(object, false); 155 nvkm_object_fini(object, false);
156 nvkm_object_del(&object); 156 nvkm_object_del(&object);
@@ -165,10 +165,10 @@ nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
165 union { 165 union {
166 struct nvif_ioctl_mthd_v0 v0; 166 struct nvif_ioctl_mthd_v0 v0;
167 } *args = data; 167 } *args = data;
168 int ret; 168 int ret = -ENOSYS;
169 169
170 nvif_ioctl(object, "mthd size %d\n", size); 170 nvif_ioctl(object, "mthd size %d\n", size);
171 if (nvif_unpack(args->v0, 0, 0, true)) { 171 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
172 nvif_ioctl(object, "mthd vers %d mthd %02x\n", 172 nvif_ioctl(object, "mthd vers %d mthd %02x\n",
173 args->v0.version, args->v0.method); 173 args->v0.version, args->v0.method);
174 ret = nvkm_object_mthd(object, args->v0.method, data, size); 174 ret = nvkm_object_mthd(object, args->v0.method, data, size);
@@ -189,10 +189,10 @@ nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
189 u16 b16; 189 u16 b16;
190 u32 b32; 190 u32 b32;
191 } v; 191 } v;
192 int ret; 192 int ret = -ENOSYS;
193 193
194 nvif_ioctl(object, "rd size %d\n", size); 194 nvif_ioctl(object, "rd size %d\n", size);
195 if (nvif_unpack(args->v0, 0, 0, false)) { 195 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
196 nvif_ioctl(object, "rd vers %d size %d addr %016llx\n", 196 nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
197 args->v0.version, args->v0.size, args->v0.addr); 197 args->v0.version, args->v0.size, args->v0.addr);
198 switch (args->v0.size) { 198 switch (args->v0.size) {
@@ -223,10 +223,10 @@ nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
223 union { 223 union {
224 struct nvif_ioctl_wr_v0 v0; 224 struct nvif_ioctl_wr_v0 v0;
225 } *args = data; 225 } *args = data;
226 int ret; 226 int ret = -ENOSYS;
227 227
228 nvif_ioctl(object, "wr size %d\n", size); 228 nvif_ioctl(object, "wr size %d\n", size);
229 if (nvif_unpack(args->v0, 0, 0, false)) { 229 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
230 nvif_ioctl(object, 230 nvif_ioctl(object,
231 "wr vers %d size %d addr %016llx data %08x\n", 231 "wr vers %d size %d addr %016llx data %08x\n",
232 args->v0.version, args->v0.size, args->v0.addr, 232 args->v0.version, args->v0.size, args->v0.addr,
@@ -251,10 +251,10 @@ nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
251 union { 251 union {
252 struct nvif_ioctl_map_v0 v0; 252 struct nvif_ioctl_map_v0 v0;
253 } *args = data; 253 } *args = data;
254 int ret; 254 int ret = -ENOSYS;
255 255
256 nvif_ioctl(object, "map size %d\n", size); 256 nvif_ioctl(object, "map size %d\n", size);
257 if (nvif_unpack(args->v0, 0, 0, false)) { 257 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
258 nvif_ioctl(object, "map vers %d\n", args->v0.version); 258 nvif_ioctl(object, "map vers %d\n", args->v0.version);
259 ret = nvkm_object_map(object, &args->v0.handle, 259 ret = nvkm_object_map(object, &args->v0.handle,
260 &args->v0.length); 260 &args->v0.length);
@@ -269,10 +269,10 @@ nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
269 union { 269 union {
270 struct nvif_ioctl_unmap none; 270 struct nvif_ioctl_unmap none;
271 } *args = data; 271 } *args = data;
272 int ret; 272 int ret = -ENOSYS;
273 273
274 nvif_ioctl(object, "unmap size %d\n", size); 274 nvif_ioctl(object, "unmap size %d\n", size);
275 if (nvif_unvers(args->none)) { 275 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
276 nvif_ioctl(object, "unmap\n"); 276 nvif_ioctl(object, "unmap\n");
277 } 277 }
278 278
@@ -286,10 +286,10 @@ nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
286 struct nvif_ioctl_ntfy_new_v0 v0; 286 struct nvif_ioctl_ntfy_new_v0 v0;
287 } *args = data; 287 } *args = data;
288 struct nvkm_event *event; 288 struct nvkm_event *event;
289 int ret; 289 int ret = -ENOSYS;
290 290
291 nvif_ioctl(object, "ntfy new size %d\n", size); 291 nvif_ioctl(object, "ntfy new size %d\n", size);
292 if (nvif_unpack(args->v0, 0, 0, true)) { 292 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
293 nvif_ioctl(object, "ntfy new vers %d event %02x\n", 293 nvif_ioctl(object, "ntfy new vers %d event %02x\n",
294 args->v0.version, args->v0.event); 294 args->v0.version, args->v0.event);
295 ret = nvkm_object_ntfy(object, args->v0.event, &event); 295 ret = nvkm_object_ntfy(object, args->v0.event, &event);
@@ -312,10 +312,10 @@ nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
312 union { 312 union {
313 struct nvif_ioctl_ntfy_del_v0 v0; 313 struct nvif_ioctl_ntfy_del_v0 v0;
314 } *args = data; 314 } *args = data;
315 int ret; 315 int ret = -ENOSYS;
316 316
317 nvif_ioctl(object, "ntfy del size %d\n", size); 317 nvif_ioctl(object, "ntfy del size %d\n", size);
318 if (nvif_unpack(args->v0, 0, 0, false)) { 318 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
319 nvif_ioctl(object, "ntfy del vers %d index %d\n", 319 nvif_ioctl(object, "ntfy del vers %d index %d\n",
320 args->v0.version, args->v0.index); 320 args->v0.version, args->v0.index);
321 ret = nvkm_client_notify_del(client, args->v0.index); 321 ret = nvkm_client_notify_del(client, args->v0.index);
@@ -331,10 +331,10 @@ nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
331 union { 331 union {
332 struct nvif_ioctl_ntfy_get_v0 v0; 332 struct nvif_ioctl_ntfy_get_v0 v0;
333 } *args = data; 333 } *args = data;
334 int ret; 334 int ret = -ENOSYS;
335 335
336 nvif_ioctl(object, "ntfy get size %d\n", size); 336 nvif_ioctl(object, "ntfy get size %d\n", size);
337 if (nvif_unpack(args->v0, 0, 0, false)) { 337 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
338 nvif_ioctl(object, "ntfy get vers %d index %d\n", 338 nvif_ioctl(object, "ntfy get vers %d index %d\n",
339 args->v0.version, args->v0.index); 339 args->v0.version, args->v0.index);
340 ret = nvkm_client_notify_get(client, args->v0.index); 340 ret = nvkm_client_notify_get(client, args->v0.index);
@@ -350,10 +350,10 @@ nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
350 union { 350 union {
351 struct nvif_ioctl_ntfy_put_v0 v0; 351 struct nvif_ioctl_ntfy_put_v0 v0;
352 } *args = data; 352 } *args = data;
353 int ret; 353 int ret = -ENOSYS;
354 354
355 nvif_ioctl(object, "ntfy put size %d\n", size); 355 nvif_ioctl(object, "ntfy put size %d\n", size);
356 if (nvif_unpack(args->v0, 0, 0, false)) { 356 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
357 nvif_ioctl(object, "ntfy put vers %d index %d\n", 357 nvif_ioctl(object, "ntfy put vers %d index %d\n",
358 args->v0.version, args->v0.index); 358 args->v0.version, args->v0.index);
359 ret = nvkm_client_notify_put(client, args->v0.index); 359 ret = nvkm_client_notify_put(client, args->v0.index);
@@ -421,12 +421,12 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
421 union { 421 union {
422 struct nvif_ioctl_v0 v0; 422 struct nvif_ioctl_v0 v0;
423 } *args = data; 423 } *args = data;
424 int ret; 424 int ret = -ENOSYS;
425 425
426 client->super = supervisor; 426 client->super = supervisor;
427 nvif_ioctl(object, "size %d\n", size); 427 nvif_ioctl(object, "size %d\n", size);
428 428
429 if (nvif_unpack(args->v0, 0, 0, true)) { 429 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
430 nvif_ioctl(object, 430 nvif_ioctl(object,
431 "vers %d type %02x object %016llx owner %02x\n", 431 "vers %d type %02x object %016llx owner %02x\n",
432 args->v0.version, args->v0.type, args->v0.object, 432 args->v0.version, args->v0.type, args->v0.object,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index c541a1c012dc..e2b944dce9b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -22,19 +22,65 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25#include <core/enum.h>
25 26
26#include <nvif/class.h> 27#include <nvif/class.h>
27 28
29static const struct nvkm_enum
30gk104_ce_launcherr_report[] = {
31 { 0x0, "NO_ERR" },
32 { 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
33 { 0x2, "INVALID_ARGUMENT" },
34 { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
35 { 0x4, "SRC_LINE_EXCEEDS_PITCH" },
36 { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
37 { 0x6, "DST_LINE_EXCEEDS_PITCH" },
38 { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
39 { 0x8, "BAD_SRC_PIXEL_COMP_REF" },
40 { 0x9, "INVALID_VALUE" },
41 { 0xa, "UNUSED_FIELD" },
42 { 0xb, "INVALID_OPERATION" },
43 {}
44};
45
46static void
47gk104_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
48{
49 struct nvkm_subdev *subdev = &ce->subdev;
50 struct nvkm_device *device = subdev->device;
51 u32 stat = nvkm_rd32(device, 0x104f14 + base);
52 const struct nvkm_enum *en =
53 nvkm_enum_find(gk104_ce_launcherr_report, stat & 0x0000000f);
54 nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
55 nvkm_wr32(device, 0x104f14 + base, 0x00000000);
56}
57
28void 58void
29gk104_ce_intr(struct nvkm_engine *ce) 59gk104_ce_intr(struct nvkm_engine *ce)
30{ 60{
31 const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000; 61 const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
32 struct nvkm_subdev *subdev = &ce->subdev; 62 struct nvkm_subdev *subdev = &ce->subdev;
33 struct nvkm_device *device = subdev->device; 63 struct nvkm_device *device = subdev->device;
34 u32 stat = nvkm_rd32(device, 0x104908 + base); 64 u32 mask = nvkm_rd32(device, 0x104904 + base);
35 if (stat) { 65 u32 intr = nvkm_rd32(device, 0x104908 + base) & mask;
36 nvkm_warn(subdev, "intr %08x\n", stat); 66 if (intr & 0x00000001) {
37 nvkm_wr32(device, 0x104908 + base, stat); 67 nvkm_warn(subdev, "BLOCKPIPE\n");
68 nvkm_wr32(device, 0x104908 + base, 0x00000001);
69 intr &= ~0x00000001;
70 }
71 if (intr & 0x00000002) {
72 nvkm_warn(subdev, "NONBLOCKPIPE\n");
73 nvkm_wr32(device, 0x104908 + base, 0x00000002);
74 intr &= ~0x00000002;
75 }
76 if (intr & 0x00000004) {
77 gk104_ce_intr_launcherr(ce, base);
78 nvkm_wr32(device, 0x104908 + base, 0x00000004);
79 intr &= ~0x00000004;
80 }
81 if (intr) {
82 nvkm_warn(subdev, "intr %08x\n", intr);
83 nvkm_wr32(device, 0x104908 + base, intr);
38 } 84 }
39} 85}
40 86
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index bbc9824af6e0..b1ba1c782a2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1388,7 +1388,7 @@ nvc1_chipset = {
1388 .mc = gf100_mc_new, 1388 .mc = gf100_mc_new,
1389 .mmu = gf100_mmu_new, 1389 .mmu = gf100_mmu_new,
1390 .mxm = nv50_mxm_new, 1390 .mxm = nv50_mxm_new,
1391 .pci = g94_pci_new, 1391 .pci = gf106_pci_new,
1392 .pmu = gf100_pmu_new, 1392 .pmu = gf100_pmu_new,
1393 .therm = gt215_therm_new, 1393 .therm = gt215_therm_new,
1394 .timer = nv41_timer_new, 1394 .timer = nv41_timer_new,
@@ -1423,7 +1423,7 @@ nvc3_chipset = {
1423 .mc = gf100_mc_new, 1423 .mc = gf100_mc_new,
1424 .mmu = gf100_mmu_new, 1424 .mmu = gf100_mmu_new,
1425 .mxm = nv50_mxm_new, 1425 .mxm = nv50_mxm_new,
1426 .pci = g94_pci_new, 1426 .pci = gf106_pci_new,
1427 .pmu = gf100_pmu_new, 1427 .pmu = gf100_pmu_new,
1428 .therm = gt215_therm_new, 1428 .therm = gt215_therm_new,
1429 .timer = nv41_timer_new, 1429 .timer = nv41_timer_new,
@@ -1566,7 +1566,7 @@ nvcf_chipset = {
1566 .mc = gf100_mc_new, 1566 .mc = gf100_mc_new,
1567 .mmu = gf100_mmu_new, 1567 .mmu = gf100_mmu_new,
1568 .mxm = nv50_mxm_new, 1568 .mxm = nv50_mxm_new,
1569 .pci = g94_pci_new, 1569 .pci = gf106_pci_new,
1570 .pmu = gf100_pmu_new, 1570 .pmu = gf100_pmu_new,
1571 .therm = gt215_therm_new, 1571 .therm = gt215_therm_new,
1572 .timer = nv41_timer_new, 1572 .timer = nv41_timer_new,
@@ -1601,7 +1601,7 @@ nvd7_chipset = {
1601 .mc = gf100_mc_new, 1601 .mc = gf100_mc_new,
1602 .mmu = gf100_mmu_new, 1602 .mmu = gf100_mmu_new,
1603 .mxm = nv50_mxm_new, 1603 .mxm = nv50_mxm_new,
1604 .pci = g94_pci_new, 1604 .pci = gf106_pci_new,
1605 .therm = gf119_therm_new, 1605 .therm = gf119_therm_new,
1606 .timer = nv41_timer_new, 1606 .timer = nv41_timer_new,
1607 .ce[0] = gf100_ce_new, 1607 .ce[0] = gf100_ce_new,
@@ -1634,7 +1634,7 @@ nvd9_chipset = {
1634 .mc = gf100_mc_new, 1634 .mc = gf100_mc_new,
1635 .mmu = gf100_mmu_new, 1635 .mmu = gf100_mmu_new,
1636 .mxm = nv50_mxm_new, 1636 .mxm = nv50_mxm_new,
1637 .pci = g94_pci_new, 1637 .pci = gf106_pci_new,
1638 .pmu = gf119_pmu_new, 1638 .pmu = gf119_pmu_new,
1639 .therm = gf119_therm_new, 1639 .therm = gf119_therm_new,
1640 .timer = nv41_timer_new, 1640 .timer = nv41_timer_new,
@@ -1669,7 +1669,7 @@ nve4_chipset = {
1669 .mc = gf100_mc_new, 1669 .mc = gf100_mc_new,
1670 .mmu = gf100_mmu_new, 1670 .mmu = gf100_mmu_new,
1671 .mxm = nv50_mxm_new, 1671 .mxm = nv50_mxm_new,
1672 .pci = g94_pci_new, 1672 .pci = gk104_pci_new,
1673 .pmu = gk104_pmu_new, 1673 .pmu = gk104_pmu_new,
1674 .therm = gf119_therm_new, 1674 .therm = gf119_therm_new,
1675 .timer = nv41_timer_new, 1675 .timer = nv41_timer_new,
@@ -1706,7 +1706,7 @@ nve6_chipset = {
1706 .mc = gf100_mc_new, 1706 .mc = gf100_mc_new,
1707 .mmu = gf100_mmu_new, 1707 .mmu = gf100_mmu_new,
1708 .mxm = nv50_mxm_new, 1708 .mxm = nv50_mxm_new,
1709 .pci = g94_pci_new, 1709 .pci = gk104_pci_new,
1710 .pmu = gk104_pmu_new, 1710 .pmu = gk104_pmu_new,
1711 .therm = gf119_therm_new, 1711 .therm = gf119_therm_new,
1712 .timer = nv41_timer_new, 1712 .timer = nv41_timer_new,
@@ -1743,7 +1743,7 @@ nve7_chipset = {
1743 .mc = gf100_mc_new, 1743 .mc = gf100_mc_new,
1744 .mmu = gf100_mmu_new, 1744 .mmu = gf100_mmu_new,
1745 .mxm = nv50_mxm_new, 1745 .mxm = nv50_mxm_new,
1746 .pci = g94_pci_new, 1746 .pci = gk104_pci_new,
1747 .pmu = gk104_pmu_new, 1747 .pmu = gk104_pmu_new,
1748 .therm = gf119_therm_new, 1748 .therm = gf119_therm_new,
1749 .timer = nv41_timer_new, 1749 .timer = nv41_timer_new,
@@ -1804,7 +1804,7 @@ nvf0_chipset = {
1804 .mc = gf100_mc_new, 1804 .mc = gf100_mc_new,
1805 .mmu = gf100_mmu_new, 1805 .mmu = gf100_mmu_new,
1806 .mxm = nv50_mxm_new, 1806 .mxm = nv50_mxm_new,
1807 .pci = g94_pci_new, 1807 .pci = gk104_pci_new,
1808 .pmu = gk110_pmu_new, 1808 .pmu = gk110_pmu_new,
1809 .therm = gf119_therm_new, 1809 .therm = gf119_therm_new,
1810 .timer = nv41_timer_new, 1810 .timer = nv41_timer_new,
@@ -1840,7 +1840,7 @@ nvf1_chipset = {
1840 .mc = gf100_mc_new, 1840 .mc = gf100_mc_new,
1841 .mmu = gf100_mmu_new, 1841 .mmu = gf100_mmu_new,
1842 .mxm = nv50_mxm_new, 1842 .mxm = nv50_mxm_new,
1843 .pci = g94_pci_new, 1843 .pci = gk104_pci_new,
1844 .pmu = gk110_pmu_new, 1844 .pmu = gk110_pmu_new,
1845 .therm = gf119_therm_new, 1845 .therm = gf119_therm_new,
1846 .timer = nv41_timer_new, 1846 .timer = nv41_timer_new,
@@ -1876,7 +1876,7 @@ nv106_chipset = {
1876 .mc = gk20a_mc_new, 1876 .mc = gk20a_mc_new,
1877 .mmu = gf100_mmu_new, 1877 .mmu = gf100_mmu_new,
1878 .mxm = nv50_mxm_new, 1878 .mxm = nv50_mxm_new,
1879 .pci = g94_pci_new, 1879 .pci = gk104_pci_new,
1880 .pmu = gk208_pmu_new, 1880 .pmu = gk208_pmu_new,
1881 .therm = gf119_therm_new, 1881 .therm = gf119_therm_new,
1882 .timer = nv41_timer_new, 1882 .timer = nv41_timer_new,
@@ -1912,7 +1912,7 @@ nv108_chipset = {
1912 .mc = gk20a_mc_new, 1912 .mc = gk20a_mc_new,
1913 .mmu = gf100_mmu_new, 1913 .mmu = gf100_mmu_new,
1914 .mxm = nv50_mxm_new, 1914 .mxm = nv50_mxm_new,
1915 .pci = g94_pci_new, 1915 .pci = gk104_pci_new,
1916 .pmu = gk208_pmu_new, 1916 .pmu = gk208_pmu_new,
1917 .therm = gf119_therm_new, 1917 .therm = gf119_therm_new,
1918 .timer = nv41_timer_new, 1918 .timer = nv41_timer_new,
@@ -1948,7 +1948,7 @@ nv117_chipset = {
1948 .mc = gk20a_mc_new, 1948 .mc = gk20a_mc_new,
1949 .mmu = gf100_mmu_new, 1949 .mmu = gf100_mmu_new,
1950 .mxm = nv50_mxm_new, 1950 .mxm = nv50_mxm_new,
1951 .pci = g94_pci_new, 1951 .pci = gk104_pci_new,
1952 .pmu = gm107_pmu_new, 1952 .pmu = gm107_pmu_new,
1953 .therm = gm107_therm_new, 1953 .therm = gm107_therm_new,
1954 .timer = gk20a_timer_new, 1954 .timer = gk20a_timer_new,
@@ -1973,13 +1973,13 @@ nv124_chipset = {
1973 .fuse = gm107_fuse_new, 1973 .fuse = gm107_fuse_new,
1974 .gpio = gk104_gpio_new, 1974 .gpio = gk104_gpio_new,
1975 .i2c = gm204_i2c_new, 1975 .i2c = gm204_i2c_new,
1976 .ibus = gk104_ibus_new, 1976 .ibus = gm204_ibus_new,
1977 .imem = nv50_instmem_new, 1977 .imem = nv50_instmem_new,
1978 .ltc = gm107_ltc_new, 1978 .ltc = gm204_ltc_new,
1979 .mc = gk20a_mc_new, 1979 .mc = gk20a_mc_new,
1980 .mmu = gf100_mmu_new, 1980 .mmu = gf100_mmu_new,
1981 .mxm = nv50_mxm_new, 1981 .mxm = nv50_mxm_new,
1982 .pci = g94_pci_new, 1982 .pci = gk104_pci_new,
1983 .pmu = gm107_pmu_new, 1983 .pmu = gm107_pmu_new,
1984 .timer = gk20a_timer_new, 1984 .timer = gk20a_timer_new,
1985 .volt = gk104_volt_new, 1985 .volt = gk104_volt_new,
@@ -2004,13 +2004,13 @@ nv126_chipset = {
2004 .fuse = gm107_fuse_new, 2004 .fuse = gm107_fuse_new,
2005 .gpio = gk104_gpio_new, 2005 .gpio = gk104_gpio_new,
2006 .i2c = gm204_i2c_new, 2006 .i2c = gm204_i2c_new,
2007 .ibus = gk104_ibus_new, 2007 .ibus = gm204_ibus_new,
2008 .imem = nv50_instmem_new, 2008 .imem = nv50_instmem_new,
2009 .ltc = gm107_ltc_new, 2009 .ltc = gm204_ltc_new,
2010 .mc = gk20a_mc_new, 2010 .mc = gk20a_mc_new,
2011 .mmu = gf100_mmu_new, 2011 .mmu = gf100_mmu_new,
2012 .mxm = nv50_mxm_new, 2012 .mxm = nv50_mxm_new,
2013 .pci = g94_pci_new, 2013 .pci = gk104_pci_new,
2014 .pmu = gm107_pmu_new, 2014 .pmu = gm107_pmu_new,
2015 .timer = gk20a_timer_new, 2015 .timer = gk20a_timer_new,
2016 .volt = gk104_volt_new, 2016 .volt = gk104_volt_new,
@@ -2033,7 +2033,7 @@ nv12b_chipset = {
2033 .fuse = gm107_fuse_new, 2033 .fuse = gm107_fuse_new,
2034 .ibus = gk20a_ibus_new, 2034 .ibus = gk20a_ibus_new,
2035 .imem = gk20a_instmem_new, 2035 .imem = gk20a_instmem_new,
2036 .ltc = gm107_ltc_new, 2036 .ltc = gm204_ltc_new,
2037 .mc = gk20a_mc_new, 2037 .mc = gk20a_mc_new,
2038 .mmu = gf100_mmu_new, 2038 .mmu = gf100_mmu_new,
2039 .timer = gk20a_timer_new, 2039 .timer = gk20a_timer_new,
@@ -2494,7 +2494,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2494 device->pri = ioremap(mmio_base, mmio_size); 2494 device->pri = ioremap(mmio_base, mmio_size);
2495 if (!device->pri) { 2495 if (!device->pri) {
2496 nvdev_error(device, "unable to map PRI\n"); 2496 nvdev_error(device, "unable to map PRI\n");
2497 return -ENOMEM; 2497 ret = -ENOMEM;
2498 goto done;
2498 } 2499 }
2499 } 2500 }
2500 2501
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index cf8bc068e9b7..b0ece71aefde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -27,6 +27,7 @@
27#include <subdev/clk.h> 27#include <subdev/clk.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/if0001.h>
30#include <nvif/ioctl.h> 31#include <nvif/ioctl.h>
31#include <nvif/unpack.h> 32#include <nvif/unpack.h>
32 33
@@ -37,10 +38,10 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
37 struct nvif_control_pstate_info_v0 v0; 38 struct nvif_control_pstate_info_v0 v0;
38 } *args = data; 39 } *args = data;
39 struct nvkm_clk *clk = ctrl->device->clk; 40 struct nvkm_clk *clk = ctrl->device->clk;
40 int ret; 41 int ret = -ENOSYS;
41 42
42 nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size); 43 nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, false)) { 44 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
44 nvif_ioctl(&ctrl->object, "control pstate info vers %d\n", 45 nvif_ioctl(&ctrl->object, "control pstate info vers %d\n",
45 args->v0.version); 46 args->v0.version);
46 } else 47 } else
@@ -75,10 +76,10 @@ nvkm_control_mthd_pstate_attr(struct nvkm_control *ctrl, void *data, u32 size)
75 struct nvkm_cstate *cstate; 76 struct nvkm_cstate *cstate;
76 int i = 0, j = -1; 77 int i = 0, j = -1;
77 u32 lo, hi; 78 u32 lo, hi;
78 int ret; 79 int ret = -ENOSYS;
79 80
80 nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size); 81 nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size);
81 if (nvif_unpack(args->v0, 0, 0, false)) { 82 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
82 nvif_ioctl(&ctrl->object, 83 nvif_ioctl(&ctrl->object,
83 "control pstate attr vers %d state %d index %d\n", 84 "control pstate attr vers %d state %d index %d\n",
84 args->v0.version, args->v0.state, args->v0.index); 85 args->v0.version, args->v0.state, args->v0.index);
@@ -143,10 +144,10 @@ nvkm_control_mthd_pstate_user(struct nvkm_control *ctrl, void *data, u32 size)
143 struct nvif_control_pstate_user_v0 v0; 144 struct nvif_control_pstate_user_v0 v0;
144 } *args = data; 145 } *args = data;
145 struct nvkm_clk *clk = ctrl->device->clk; 146 struct nvkm_clk *clk = ctrl->device->clk;
146 int ret; 147 int ret = -ENOSYS;
147 148
148 nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size); 149 nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size);
149 if (nvif_unpack(args->v0, 0, 0, false)) { 150 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
150 nvif_ioctl(&ctrl->object, 151 nvif_ioctl(&ctrl->object,
151 "control pstate user vers %d ustate %d pwrsrc %d\n", 152 "control pstate user vers %d ustate %d pwrsrc %d\n",
152 args->v0.version, args->v0.ustate, args->v0.pwrsrc); 153 args->v0.version, args->v0.ustate, args->v0.pwrsrc);
@@ -204,7 +205,7 @@ nvkm_control_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
204 205
205const struct nvkm_device_oclass 206const struct nvkm_device_oclass
206nvkm_control_oclass = { 207nvkm_control_oclass = {
207 .base.oclass = NVIF_IOCTL_NEW_V0_CONTROL, 208 .base.oclass = NVIF_CLASS_CONTROL,
208 .base.minver = -1, 209 .base.minver = -1,
209 .base.maxver = -1, 210 .base.maxver = -1,
210 .ctor = nvkm_control_new, 211 .ctor = nvkm_control_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index caf22b589edc..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -259,12 +259,6 @@ nvkm_device_pci_10de_0df4[] = {
259}; 259};
260 260
261static const struct nvkm_device_pci_vendor 261static const struct nvkm_device_pci_vendor
262nvkm_device_pci_10de_0fcd[] = {
263 { 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */
264 {}
265};
266
267static const struct nvkm_device_pci_vendor
268nvkm_device_pci_10de_0fd2[] = { 262nvkm_device_pci_10de_0fd2[] = {
269 { 0x1028, 0x0595, "GeForce GT 640M LE" }, 263 { 0x1028, 0x0595, "GeForce GT 640M LE" },
270 { 0x1028, 0x05b2, "GeForce GT 640M LE" }, 264 { 0x1028, 0x05b2, "GeForce GT 640M LE" },
@@ -279,12 +273,6 @@ nvkm_device_pci_10de_0fe3[] = {
279}; 273};
280 274
281static const struct nvkm_device_pci_vendor 275static const struct nvkm_device_pci_vendor
282nvkm_device_pci_10de_0fe4[] = {
283 { 0x144d, 0xc740, NULL, { .War00C800_0 = true } },
284 {}
285};
286
287static const struct nvkm_device_pci_vendor
288nvkm_device_pci_10de_104b[] = { 276nvkm_device_pci_10de_104b[] = {
289 { 0x1043, 0x844c, "GeForce GT 625" }, 277 { 0x1043, 0x844c, "GeForce GT 625" },
290 { 0x1043, 0x846b, "GeForce GT 625" }, 278 { 0x1043, 0x846b, "GeForce GT 625" },
@@ -690,13 +678,6 @@ nvkm_device_pci_10de_1189[] = {
690static const struct nvkm_device_pci_vendor 678static const struct nvkm_device_pci_vendor
691nvkm_device_pci_10de_1199[] = { 679nvkm_device_pci_10de_1199[] = {
692 { 0x1458, 0xd001, "GeForce GTX 760" }, 680 { 0x1458, 0xd001, "GeForce GTX 760" },
693 { 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */
694 {}
695};
696
697static const struct nvkm_device_pci_vendor
698nvkm_device_pci_10de_11e0[] = {
699 { 0x1558, 0x5106, NULL, { .War00C800_0 = true } },
700 {} 681 {}
701}; 682};
702 683
@@ -707,14 +688,6 @@ nvkm_device_pci_10de_11e3[] = {
707}; 688};
708 689
709static const struct nvkm_device_pci_vendor 690static const struct nvkm_device_pci_vendor
710nvkm_device_pci_10de_11fc[] = {
711 { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */
712 { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
713 { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
714 {}
715};
716
717static const struct nvkm_device_pci_vendor
718nvkm_device_pci_10de_1247[] = { 691nvkm_device_pci_10de_1247[] = {
719 { 0x1043, 0x212a, "GeForce GT 635M" }, 692 { 0x1043, 0x212a, "GeForce GT 635M" },
720 { 0x1043, 0x212b, "GeForce GT 635M" }, 693 { 0x1043, 0x212b, "GeForce GT 635M" },
@@ -1368,7 +1341,7 @@ nvkm_device_pci_10de[] = {
1368 { 0x0fc6, "GeForce GTX 650" }, 1341 { 0x0fc6, "GeForce GTX 650" },
1369 { 0x0fc8, "GeForce GT 740" }, 1342 { 0x0fc8, "GeForce GT 740" },
1370 { 0x0fc9, "GeForce GT 730" }, 1343 { 0x0fc9, "GeForce GT 730" },
1371 { 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd }, 1344 { 0x0fcd, "GeForce GT 755M" },
1372 { 0x0fce, "GeForce GT 640M LE" }, 1345 { 0x0fce, "GeForce GT 640M LE" },
1373 { 0x0fd1, "GeForce GT 650M" }, 1346 { 0x0fd1, "GeForce GT 650M" },
1374 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, 1347 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
@@ -1382,7 +1355,7 @@ nvkm_device_pci_10de[] = {
1382 { 0x0fe1, "GeForce GT 730M" }, 1355 { 0x0fe1, "GeForce GT 730M" },
1383 { 0x0fe2, "GeForce GT 745M" }, 1356 { 0x0fe2, "GeForce GT 745M" },
1384 { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, 1357 { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
1385 { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 }, 1358 { 0x0fe4, "GeForce GT 750M" },
1386 { 0x0fe9, "GeForce GT 750M" }, 1359 { 0x0fe9, "GeForce GT 750M" },
1387 { 0x0fea, "GeForce GT 755M" }, 1360 { 0x0fea, "GeForce GT 755M" },
1388 { 0x0fec, "GeForce 710A" }, 1361 { 0x0fec, "GeForce 710A" },
@@ -1497,12 +1470,12 @@ nvkm_device_pci_10de[] = {
1497 { 0x11c6, "GeForce GTX 650 Ti" }, 1470 { 0x11c6, "GeForce GTX 650 Ti" },
1498 { 0x11c8, "GeForce GTX 650" }, 1471 { 0x11c8, "GeForce GTX 650" },
1499 { 0x11cb, "GeForce GT 740" }, 1472 { 0x11cb, "GeForce GT 740" },
1500 { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 }, 1473 { 0x11e0, "GeForce GTX 770M" },
1501 { 0x11e1, "GeForce GTX 765M" }, 1474 { 0x11e1, "GeForce GTX 765M" },
1502 { 0x11e2, "GeForce GTX 765M" }, 1475 { 0x11e2, "GeForce GTX 765M" },
1503 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, 1476 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
1504 { 0x11fa, "Quadro K4000" }, 1477 { 0x11fa, "Quadro K4000" },
1505 { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc }, 1478 { 0x11fc, "Quadro K2100M" },
1506 { 0x1200, "GeForce GTX 560 Ti" }, 1479 { 0x1200, "GeForce GTX 560 Ti" },
1507 { 0x1201, "GeForce GTX 560" }, 1480 { 0x1201, "GeForce GTX 560" },
1508 { 0x1203, "GeForce GTX 460 SE v2" }, 1481 { 0x1203, "GeForce GTX 460 SE v2" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 1ae48f27029d..137066426ed7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -31,6 +31,7 @@
31#include <subdev/timer.h> 31#include <subdev/timer.h>
32 32
33#include <nvif/class.h> 33#include <nvif/class.h>
34#include <nvif/cl0080.h>
34#include <nvif/unpack.h> 35#include <nvif/unpack.h>
35 36
36struct nvkm_udevice { 37struct nvkm_udevice {
@@ -48,10 +49,10 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
48 union { 49 union {
49 struct nv_device_info_v0 v0; 50 struct nv_device_info_v0 v0;
50 } *args = data; 51 } *args = data;
51 int ret; 52 int ret = -ENOSYS;
52 53
53 nvif_ioctl(object, "device info size %d\n", size); 54 nvif_ioctl(object, "device info size %d\n", size);
54 if (nvif_unpack(args->v0, 0, 0, false)) { 55 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
55 nvif_ioctl(object, "device info vers %d\n", args->v0.version); 56 nvif_ioctl(object, "device info vers %d\n", args->v0.version);
56 } else 57 } else
57 return ret; 58 return ret;
@@ -123,13 +124,16 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
123static int 124static int
124nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size) 125nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
125{ 126{
127 struct nvkm_object *object = &udev->object;
126 struct nvkm_device *device = udev->device; 128 struct nvkm_device *device = udev->device;
127 union { 129 union {
128 struct nv_device_time_v0 v0; 130 struct nv_device_time_v0 v0;
129 } *args = data; 131 } *args = data;
130 int ret; 132 int ret = -ENOSYS;
131 133
132 if (nvif_unpack(args->v0, 0, 0, false)) { 134 nvif_ioctl(object, "device time size %d\n", size);
135 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
136 nvif_ioctl(object, "device time vers %d\n", args->v0.version);
133 args->v0.time = nvkm_timer_read(device->timer); 137 args->v0.time = nvkm_timer_read(device->timer);
134 } 138 }
135 139
@@ -140,6 +144,7 @@ static int
140nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 144nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
141{ 145{
142 struct nvkm_udevice *udev = nvkm_udevice(object); 146 struct nvkm_udevice *udev = nvkm_udevice(object);
147 nvif_ioctl(object, "device mthd %08x\n", mthd);
143 switch (mthd) { 148 switch (mthd) {
144 case NV_DEVICE_V0_INFO: 149 case NV_DEVICE_V0_INFO:
145 return nvkm_udevice_info(udev, data, size); 150 return nvkm_udevice_info(udev, data, size);
@@ -331,10 +336,10 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
331 struct nvkm_object *parent = &client->object; 336 struct nvkm_object *parent = &client->object;
332 const struct nvkm_object_func *func; 337 const struct nvkm_object_func *func;
333 struct nvkm_udevice *udev; 338 struct nvkm_udevice *udev;
334 int ret; 339 int ret = -ENOSYS;
335 340
336 nvif_ioctl(parent, "create device size %d\n", size); 341 nvif_ioctl(parent, "create device size %d\n", size);
337 if (nvif_unpack(args->v0, 0, 0, false)) { 342 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
338 nvif_ioctl(parent, "create device v%d device %016llx\n", 343 nvif_ioctl(parent, "create device v%d device %016llx\n",
339 args->v0.version, args->v0.device); 344 args->v0.version, args->v0.device);
340 } else 345 } else
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 44b67719f64d..785fa76d0fbf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -32,6 +32,7 @@
32#include <subdev/bios/dcb.h> 32#include <subdev/bios/dcb.h>
33 33
34#include <nvif/class.h> 34#include <nvif/class.h>
35#include <nvif/cl0046.h>
35#include <nvif/event.h> 36#include <nvif/event.h>
36#include <nvif/unpack.h> 37#include <nvif/unpack.h>
37 38
@@ -58,9 +59,9 @@ nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
58 union { 59 union {
59 struct nvif_notify_head_req_v0 v0; 60 struct nvif_notify_head_req_v0 v0;
60 } *req = data; 61 } *req = data;
61 int ret; 62 int ret = -ENOSYS;
62 63
63 if (nvif_unpack(req->v0, 0, 0, false)) { 64 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
64 notify->size = sizeof(struct nvif_notify_head_rep_v0); 65 notify->size = sizeof(struct nvif_notify_head_rep_v0);
65 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) { 66 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
66 notify->types = 1; 67 notify->types = 1;
@@ -96,9 +97,9 @@ nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
96 struct nvif_notify_conn_req_v0 v0; 97 struct nvif_notify_conn_req_v0 v0;
97 } *req = data; 98 } *req = data;
98 struct nvkm_output *outp; 99 struct nvkm_output *outp;
99 int ret; 100 int ret = -ENOSYS;
100 101
101 if (nvif_unpack(req->v0, 0, 0, false)) { 102 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
102 notify->size = sizeof(struct nvif_notify_conn_rep_v0); 103 notify->size = sizeof(struct nvif_notify_conn_rep_v0);
103 list_for_each_entry(outp, &disp->outp, head) { 104 list_for_each_entry(outp, &disp->outp, head) {
104 if (ret = -ENXIO, outp->conn->index == req->v0.conn) { 105 if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
index 1fd89edefc26..83f152300ec0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -27,6 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl507c.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32int 33int
@@ -41,11 +42,11 @@ nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
41 } *args = data; 42 } *args = data;
42 struct nvkm_object *parent = oclass->parent; 43 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp; 44 struct nv50_disp *disp = root->disp;
44 int head, ret; 45 int head, ret = -ENOSYS;
45 u64 push; 46 u64 push;
46 47
47 nvif_ioctl(parent, "create disp base channel dma size %d\n", size); 48 nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) { 49 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
49 nvif_ioctl(parent, "create disp base channel dma vers %d " 50 nvif_ioctl(parent, "create disp base channel dma vers %d "
50 "pushbuf %016llx head %d\n", 51 "pushbuf %016llx head %d\n",
51 args->v0.version, args->v0.pushbuf, args->v0.head); 52 args->v0.version, args->v0.pushbuf, args->v0.head);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 01803c0679b6..dd2953bc9264 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -28,7 +28,7 @@
28#include <core/ramht.h> 28#include <core/ramht.h>
29#include <engine/dma.h> 29#include <engine/dma.h>
30 30
31#include <nvif/class.h> 31#include <nvif/cl507d.h>
32#include <nvif/event.h> 32#include <nvif/event.h>
33#include <nvif/unpack.h> 33#include <nvif/unpack.h>
34 34
@@ -134,9 +134,9 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
134 union { 134 union {
135 struct nvif_notify_uevent_req none; 135 struct nvif_notify_uevent_req none;
136 } *args = data; 136 } *args = data;
137 int ret; 137 int ret = -ENOSYS;
138 138
139 if (nvif_unvers(args->none)) { 139 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
140 notify->size = sizeof(struct nvif_notify_uevent_rep); 140 notify->size = sizeof(struct nvif_notify_uevent_rep);
141 notify->types = 1; 141 notify->types = 1;
142 notify->index = chan->chid; 142 notify->index = chan->chid;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index db4a9b3e0e09..b547c8b833ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -28,6 +28,7 @@
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29 29
30#include <nvif/class.h> 30#include <nvif/class.h>
31#include <nvif/cl507d.h>
31#include <nvif/unpack.h> 32#include <nvif/unpack.h>
32 33
33int 34int
@@ -42,10 +43,10 @@ nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
42 } *args = data; 43 } *args = data;
43 struct nvkm_object *parent = oclass->parent; 44 struct nvkm_object *parent = oclass->parent;
44 u64 push; 45 u64 push;
45 int ret; 46 int ret = -ENOSYS;
46 47
47 nvif_ioctl(parent, "create disp core channel dma size %d\n", size); 48 nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) { 49 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
49 nvif_ioctl(parent, "create disp core channel dma vers %d " 50 nvif_ioctl(parent, "create disp core channel dma vers %d "
50 "pushbuf %016llx\n", 51 "pushbuf %016llx\n",
51 args->v0.version, args->v0.pushbuf); 52 args->v0.version, args->v0.pushbuf);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index 225858e62cf6..8b1320499a0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -27,6 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl507a.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32int 33int
@@ -41,10 +42,10 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
41 } *args = data; 42 } *args = data;
42 struct nvkm_object *parent = oclass->parent; 43 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp; 44 struct nv50_disp *disp = root->disp;
44 int head, ret; 45 int head, ret = -ENOSYS;
45 46
46 nvif_ioctl(parent, "create disp cursor size %d\n", size); 47 nvif_ioctl(parent, "create disp cursor size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) { 48 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
48 nvif_ioctl(parent, "create disp cursor vers %d head %d\n", 49 nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
49 args->v0.version, args->v0.head); 50 args->v0.version, args->v0.head);
50 if (args->v0.head > disp->base.head.nr) 51 if (args->v0.head > disp->base.head.nr)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 9bfa9e7dc161..c9b78b8f9c87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -27,7 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29 29
30#include <nvif/class.h> 30#include <nvif/cl5070.h>
31#include <nvif/unpack.h> 31#include <nvif/unpack.h>
32 32
33int 33int
@@ -39,10 +39,10 @@ nv50_dac_power(NV50_DISP_MTHD_V1)
39 struct nv50_disp_dac_pwr_v0 v0; 39 struct nv50_disp_dac_pwr_v0 v0;
40 } *args = data; 40 } *args = data;
41 u32 stat; 41 u32 stat;
42 int ret; 42 int ret = -ENOSYS;
43 43
44 nvif_ioctl(object, "disp dac pwr size %d\n", size); 44 nvif_ioctl(object, "disp dac pwr size %d\n", size);
45 if (nvif_unpack(args->v0, 0, 0, false)) { 45 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
46 nvif_ioctl(object, "disp dac pwr vers %d state %d data %d " 46 nvif_ioctl(object, "disp dac pwr vers %d state %d data %d "
47 "vsync %d hsync %d\n", 47 "vsync %d hsync %d\n",
48 args->v0.version, args->v0.state, args->v0.data, 48 args->v0.version, args->v0.state, args->v0.data,
@@ -76,10 +76,10 @@ nv50_dac_sense(NV50_DISP_MTHD_V1)
76 } *args = data; 76 } *args = data;
77 const u32 doff = outp->or * 0x800; 77 const u32 doff = outp->or * 0x800;
78 u32 loadval; 78 u32 loadval;
79 int ret; 79 int ret = -ENOSYS;
80 80
81 nvif_ioctl(object, "disp dac load size %d\n", size); 81 nvif_ioctl(object, "disp dac load size %d\n", size);
82 if (nvif_unpack(args->v0, 0, 0, false)) { 82 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
83 nvif_ioctl(object, "disp dac load vers %d data %08x\n", 83 nvif_ioctl(object, "disp dac load vers %d data %08x\n",
84 args->v0.version, args->v0.data); 84 args->v0.version, args->v0.data);
85 if (args->v0.data & 0xfff00000) 85 if (args->v0.data & 0xfff00000)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 186fd3ac78f6..f0314664349c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -158,7 +158,7 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
158 switch (outp->info.type) { 158 switch (outp->info.type) {
159 case DCB_OUTPUT_TMDS: 159 case DCB_OUTPUT_TMDS:
160 *conf = (ctrl & 0x00000f00) >> 8; 160 *conf = (ctrl & 0x00000f00) >> 8;
161 if (pclk >= 165000) 161 if (*conf == 5)
162 *conf |= 0x0100; 162 *conf |= 0x0100;
163 break; 163 break;
164 case DCB_OUTPUT_LVDS: 164 case DCB_OUTPUT_LVDS:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
index af99efbd63f7..da6129b2b78f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -29,7 +29,7 @@
29#include <subdev/bios/dcb.h> 29#include <subdev/bios/dcb.h>
30#include <subdev/timer.h> 30#include <subdev/timer.h>
31 31
32#include <nvif/class.h> 32#include <nvif/cl5070.h>
33#include <nvif/unpack.h> 33#include <nvif/unpack.h>
34 34
35int 35int
@@ -41,10 +41,10 @@ gf119_hda_eld(NV50_DISP_MTHD_V1)
41 } *args = data; 41 } *args = data;
42 const u32 soff = outp->or * 0x030; 42 const u32 soff = outp->or * 0x030;
43 const u32 hoff = head * 0x800; 43 const u32 hoff = head * 0x800;
44 int ret, i; 44 int ret = -ENOSYS, i;
45 45
46 nvif_ioctl(object, "disp sor hda eld size %d\n", size); 46 nvif_ioctl(object, "disp sor hda eld size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, true)) { 47 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
48 nvif_ioctl(object, "disp sor hda eld vers %d\n", 48 nvif_ioctl(object, "disp sor hda eld vers %d\n",
49 args->v0.version); 49 args->v0.version);
50 if (size > 0x60) 50 if (size > 0x60)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index c1590b746f13..6f0436df0219 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -27,7 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29 29
30#include <nvif/class.h> 30#include <nvif/cl5070.h>
31#include <nvif/unpack.h> 31#include <nvif/unpack.h>
32 32
33int 33int
@@ -38,10 +38,10 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
38 struct nv50_disp_sor_hda_eld_v0 v0; 38 struct nv50_disp_sor_hda_eld_v0 v0;
39 } *args = data; 39 } *args = data;
40 const u32 soff = outp->or * 0x800; 40 const u32 soff = outp->or * 0x800;
41 int ret, i; 41 int ret = -ENOSYS, i;
42 42
43 nvif_ioctl(object, "disp sor hda eld size %d\n", size); 43 nvif_ioctl(object, "disp sor hda eld size %d\n", size);
44 if (nvif_unpack(args->v0, 0, 0, true)) { 44 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
45 nvif_ioctl(object, "disp sor hda eld vers %d\n", 45 nvif_ioctl(object, "disp sor hda eld vers %d\n",
46 args->v0.version); 46 args->v0.version);
47 if (size > 0x60) 47 if (size > 0x60)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
index ee9e800a8f06..1c4256e8cbd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
@@ -25,7 +25,7 @@
25 25
26#include <core/client.h> 26#include <core/client.h>
27 27
28#include <nvif/class.h> 28#include <nvif/cl5070.h>
29#include <nvif/unpack.h> 29#include <nvif/unpack.h>
30 30
31int 31int
@@ -37,10 +37,10 @@ g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
37 struct nv50_disp_sor_hdmi_pwr_v0 v0; 37 struct nv50_disp_sor_hdmi_pwr_v0 v0;
38 } *args = data; 38 } *args = data;
39 u32 ctrl; 39 u32 ctrl;
40 int ret; 40 int ret = -ENOSYS;
41 41
42 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 42 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, false)) { 43 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
44 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 44 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
45 "max_ac_packet %d rekey %d\n", 45 "max_ac_packet %d rekey %d\n",
46 args->v0.version, args->v0.state, 46 args->v0.version, args->v0.state,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
index b5af025d3b04..632f02da1382 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
@@ -25,7 +25,7 @@
25 25
26#include <core/client.h> 26#include <core/client.h>
27 27
28#include <nvif/class.h> 28#include <nvif/cl5070.h>
29#include <nvif/unpack.h> 29#include <nvif/unpack.h>
30 30
31int 31int
@@ -37,10 +37,10 @@ gf119_hdmi_ctrl(NV50_DISP_MTHD_V1)
37 struct nv50_disp_sor_hdmi_pwr_v0 v0; 37 struct nv50_disp_sor_hdmi_pwr_v0 v0;
38 } *args = data; 38 } *args = data;
39 u32 ctrl; 39 u32 ctrl;
40 int ret; 40 int ret = -ENOSYS;
41 41
42 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 42 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, false)) { 43 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
44 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 44 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
45 "max_ac_packet %d rekey %d\n", 45 "max_ac_packet %d rekey %d\n",
46 args->v0.version, args->v0.state, 46 args->v0.version, args->v0.state,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
index 110dc19e4f67..4e8067d511d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
@@ -25,7 +25,7 @@
25 25
26#include <core/client.h> 26#include <core/client.h>
27 27
28#include <nvif/class.h> 28#include <nvif/cl5070.h>
29#include <nvif/unpack.h> 29#include <nvif/unpack.h>
30 30
31int 31int
@@ -38,10 +38,10 @@ gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
38 struct nv50_disp_sor_hdmi_pwr_v0 v0; 38 struct nv50_disp_sor_hdmi_pwr_v0 v0;
39 } *args = data; 39 } *args = data;
40 u32 ctrl; 40 u32 ctrl;
41 int ret; 41 int ret = -ENOSYS;
42 42
43 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 43 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
44 if (nvif_unpack(args->v0, 0, 0, false)) { 44 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
45 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 45 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
46 "max_ac_packet %d rekey %d\n", 46 "max_ac_packet %d rekey %d\n",
47 args->v0.version, args->v0.state, 47 args->v0.version, args->v0.state,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
index 61237dbfa35a..f1afc16494b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
@@ -26,7 +26,7 @@
26 26
27#include <core/client.h> 27#include <core/client.h>
28 28
29#include <nvif/class.h> 29#include <nvif/cl5070.h>
30#include <nvif/unpack.h> 30#include <nvif/unpack.h>
31 31
32int 32int
@@ -38,10 +38,10 @@ gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
38 struct nv50_disp_sor_hdmi_pwr_v0 v0; 38 struct nv50_disp_sor_hdmi_pwr_v0 v0;
39 } *args = data; 39 } *args = data;
40 u32 ctrl; 40 u32 ctrl;
41 int ret; 41 int ret = -ENOSYS;
42 42
43 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 43 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
44 if (nvif_unpack(args->v0, 0, 0, false)) { 44 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
45 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 45 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
46 "max_ac_packet %d rekey %d\n", 46 "max_ac_packet %d rekey %d\n",
47 args->v0.version, args->v0.state, 47 args->v0.version, args->v0.state,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 32e73a975b58..4226d2153b9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -391,7 +391,7 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
391 switch (outp->info.type) { 391 switch (outp->info.type) {
392 case DCB_OUTPUT_TMDS: 392 case DCB_OUTPUT_TMDS:
393 *conf = (ctrl & 0x00000f00) >> 8; 393 *conf = (ctrl & 0x00000f00) >> 8;
394 if (pclk >= 165000) 394 if (*conf == 5)
395 *conf |= 0x0100; 395 *conf |= 0x0100;
396 break; 396 break;
397 case DCB_OUTPUT_LVDS: 397 case DCB_OUTPUT_LVDS:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index cd888a1e443c..3940b9c966ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -27,6 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl507b.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32int 33int
@@ -41,10 +42,10 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
41 } *args = data; 42 } *args = data;
42 struct nvkm_object *parent = oclass->parent; 43 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp; 44 struct nv50_disp *disp = root->disp;
44 int head, ret; 45 int head, ret = -ENOSYS;
45 46
46 nvif_ioctl(parent, "create disp overlay size %d\n", size); 47 nvif_ioctl(parent, "create disp overlay size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) { 48 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
48 nvif_ioctl(parent, "create disp overlay vers %d head %d\n", 49 nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
49 args->v0.version, args->v0.head); 50 args->v0.version, args->v0.head);
50 if (args->v0.head > disp->base.head.nr) 51 if (args->v0.head > disp->base.head.nr)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
index 6fa296c047b8..2a49c46425cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -27,6 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl507e.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32int 33int
@@ -41,11 +42,11 @@ nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
41 } *args = data; 42 } *args = data;
42 struct nvkm_object *parent = oclass->parent; 43 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp; 44 struct nv50_disp *disp = root->disp;
44 int head, ret; 45 int head, ret = -ENOSYS;
45 u64 push; 46 u64 push;
46 47
47 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size); 48 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) { 49 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
49 nvif_ioctl(parent, "create disp overlay channel dma vers %d " 50 nvif_ioctl(parent, "create disp overlay channel dma vers %d "
50 "pushbuf %016llx head %d\n", 51 "pushbuf %016llx head %d\n",
51 args->v0.version, args->v0.pushbuf, args->v0.head); 52 args->v0.version, args->v0.pushbuf, args->v0.head);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index ab524bde7795..6c532eadba17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -28,7 +28,7 @@
28#include <subdev/i2c.h> 28#include <subdev/i2c.h>
29#include <subdev/timer.h> 29#include <subdev/timer.h>
30 30
31#include <nvif/class.h> 31#include <nvif/cl5070.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34int 34int
@@ -40,10 +40,10 @@ nv50_pior_power(NV50_DISP_MTHD_V1)
40 struct nv50_disp_pior_pwr_v0 v0; 40 struct nv50_disp_pior_pwr_v0 v0;
41 } *args = data; 41 } *args = data;
42 u32 ctrl, type; 42 u32 ctrl, type;
43 int ret; 43 int ret = -ENOSYS;
44 44
45 nvif_ioctl(object, "disp pior pwr size %d\n", size); 45 nvif_ioctl(object, "disp pior pwr size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, false)) { 46 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
47 nvif_ioctl(object, "disp pior pwr vers %d state %d type %x\n", 47 nvif_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
48 args->v0.version, args->v0.state, args->v0.type); 48 args->v0.version, args->v0.state, args->v0.type);
49 if (args->v0.type > 0x0f) 49 if (args->v0.type > 0x0f)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index 8591726871ac..335d88823c22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -29,6 +29,7 @@
29#include <subdev/timer.h> 29#include <subdev/timer.h>
30 30
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/cl5070.h>
32#include <nvif/unpack.h> 33#include <nvif/unpack.h>
33 34
34int 35int
@@ -39,12 +40,12 @@ gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
39 const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300)); 40 const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300));
40 const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300)); 41 const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300));
41 union { 42 union {
42 struct nv04_disp_scanoutpos_v0 v0; 43 struct nv50_disp_scanoutpos_v0 v0;
43 } *args = data; 44 } *args = data;
44 int ret; 45 int ret = -ENOSYS;
45 46
46 nvif_ioctl(object, "disp scanoutpos size %d\n", size); 47 nvif_ioctl(object, "disp scanoutpos size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) { 48 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
48 nvif_ioctl(object, "disp scanoutpos vers %d\n", 49 nvif_ioctl(object, "disp scanoutpos vers %d\n",
49 args->v0.version); 50 args->v0.version);
50 args->v0.vblanke = (blanke & 0xffff0000) >> 16; 51 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
index 2be846374d39..f535f43231e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
@@ -27,6 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl0046.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32struct nv04_disp_root { 33struct nv04_disp_root {
@@ -45,10 +46,10 @@ nv04_disp_scanoutpos(struct nv04_disp_root *root,
45 struct nv04_disp_scanoutpos_v0 v0; 46 struct nv04_disp_scanoutpos_v0 v0;
46 } *args = data; 47 } *args = data;
47 u32 line; 48 u32 line;
48 int ret; 49 int ret = -ENOSYS;
49 50
50 nvif_ioctl(object, "disp scanoutpos size %d\n", size); 51 nvif_ioctl(object, "disp scanoutpos size %d\n", size);
51 if (nvif_unpack(args->v0, 0, 0, false)) { 52 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
52 nvif_ioctl(object, "disp scanoutpos vers %d\n", 53 nvif_ioctl(object, "disp scanoutpos vers %d\n",
53 args->v0.version); 54 args->v0.version);
54 args->v0.vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0xffff; 55 args->v0.vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0xffff;
@@ -85,10 +86,10 @@ nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
85 union { 86 union {
86 struct nv04_disp_mthd_v0 v0; 87 struct nv04_disp_mthd_v0 v0;
87 } *args = data; 88 } *args = data;
88 int head, ret; 89 int head, ret = -ENOSYS;
89 90
90 nvif_ioctl(object, "disp mthd size %d\n", size); 91 nvif_ioctl(object, "disp mthd size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, true)) { 92 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
92 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", 93 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
93 args->v0.version, args->v0.method, args->v0.head); 94 args->v0.version, args->v0.method, args->v0.head);
94 mthd = args->v0.method; 95 mthd = args->v0.method;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 06fb24d88702..2f9cecd81d04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -29,6 +29,7 @@
29#include <subdev/timer.h> 29#include <subdev/timer.h>
30 30
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/cl5070.h>
32#include <nvif/unpack.h> 33#include <nvif/unpack.h>
33 34
34int 35int
@@ -39,12 +40,12 @@ nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
39 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540)); 40 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540));
40 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540)); 41 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540));
41 union { 42 union {
42 struct nv04_disp_scanoutpos_v0 v0; 43 struct nv50_disp_scanoutpos_v0 v0;
43 } *args = data; 44 } *args = data;
44 int ret; 45 int ret = -ENOSYS;
45 46
46 nvif_ioctl(object, "disp scanoutpos size %d\n", size); 47 nvif_ioctl(object, "disp scanoutpos size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) { 48 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
48 nvif_ioctl(object, "disp scanoutpos vers %d\n", 49 nvif_ioctl(object, "disp scanoutpos vers %d\n",
49 args->v0.version); 50 args->v0.version);
50 args->v0.vblanke = (blanke & 0xffff0000) >> 16; 51 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
@@ -78,19 +79,19 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
78 struct nvkm_output *outp = NULL; 79 struct nvkm_output *outp = NULL;
79 struct nvkm_output *temp; 80 struct nvkm_output *temp;
80 u16 type, mask = 0; 81 u16 type, mask = 0;
81 int head, ret; 82 int head, ret = -ENOSYS;
82 83
83 if (mthd != NV50_DISP_MTHD) 84 if (mthd != NV50_DISP_MTHD)
84 return -EINVAL; 85 return -EINVAL;
85 86
86 nvif_ioctl(object, "disp mthd size %d\n", size); 87 nvif_ioctl(object, "disp mthd size %d\n", size);
87 if (nvif_unpack(args->v0, 0, 0, true)) { 88 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
88 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", 89 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
89 args->v0.version, args->v0.method, args->v0.head); 90 args->v0.version, args->v0.method, args->v0.head);
90 mthd = args->v0.method; 91 mthd = args->v0.method;
91 head = args->v0.head; 92 head = args->v0.head;
92 } else 93 } else
93 if (nvif_unpack(args->v1, 1, 1, true)) { 94 if (!(ret = nvif_unpack(ret, &data, &size, args->v1, 1, 1, true))) {
94 nvif_ioctl(object, "disp mthd vers %d mthd %02x " 95 nvif_ioctl(object, "disp mthd vers %d mthd %02x "
95 "type %04x mask %04x\n", 96 "type %04x mask %04x\n",
96 args->v1.version, args->v1.method, 97 args->v1.version, args->v1.method,
@@ -143,8 +144,9 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
143 union { 144 union {
144 struct nv50_disp_sor_lvds_script_v0 v0; 145 struct nv50_disp_sor_lvds_script_v0 v0;
145 } *args = data; 146 } *args = data;
147 int ret = -ENOSYS;
146 nvif_ioctl(object, "disp sor lvds script size %d\n", size); 148 nvif_ioctl(object, "disp sor lvds script size %d\n", size);
147 if (nvif_unpack(args->v0, 0, 0, false)) { 149 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
148 nvif_ioctl(object, "disp sor lvds script " 150 nvif_ioctl(object, "disp sor lvds script "
149 "vers %d name %04x\n", 151 "vers %d name %04x\n",
150 args->v0.version, args->v0.script); 152 args->v0.version, args->v0.script);
@@ -159,8 +161,9 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
159 union { 161 union {
160 struct nv50_disp_sor_dp_pwr_v0 v0; 162 struct nv50_disp_sor_dp_pwr_v0 v0;
161 } *args = data; 163 } *args = data;
164 int ret = -ENOSYS;
162 nvif_ioctl(object, "disp sor dp pwr size %d\n", size); 165 nvif_ioctl(object, "disp sor dp pwr size %d\n", size);
163 if (nvif_unpack(args->v0, 0, 0, false)) { 166 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
164 nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n", 167 nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n",
165 args->v0.version, args->v0.state); 168 args->v0.version, args->v0.state);
166 if (args->v0.state == 0) { 169 if (args->v0.state == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index 29e0d2a9a839..53596bed3c36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -27,7 +27,7 @@
27#include <core/client.h> 27#include <core/client.h>
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29 29
30#include <nvif/class.h> 30#include <nvif/cl5070.h>
31#include <nvif/unpack.h> 31#include <nvif/unpack.h>
32 32
33int 33int
@@ -39,10 +39,10 @@ nv50_sor_power(NV50_DISP_MTHD_V1)
39 } *args = data; 39 } *args = data;
40 const u32 soff = outp->or * 0x800; 40 const u32 soff = outp->or * 0x800;
41 u32 stat; 41 u32 stat;
42 int ret; 42 int ret = -ENOSYS;
43 43
44 nvif_ioctl(object, "disp sor pwr size %d\n", size); 44 nvif_ioctl(object, "disp sor pwr size %d\n", size);
45 if (nvif_unpack(args->v0, 0, 0, false)) { 45 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
46 nvif_ioctl(object, "disp sor pwr vers %d state %d\n", 46 nvif_ioctl(object, "disp sor pwr vers %d state %d\n",
47 args->v0.version, args->v0.state); 47 args->v0.version, args->v0.state);
48 stat = !!args->v0.state; 48 stat = !!args->v0.state;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index 45ab062661a4..13c661b1ef14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -28,7 +28,7 @@
28#include <subdev/fb.h> 28#include <subdev/fb.h>
29#include <subdev/instmem.h> 29#include <subdev/instmem.h>
30 30
31#include <nvif/class.h> 31#include <nvif/cl0002.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34static int 34static int
@@ -69,7 +69,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
69 struct nvkm_fb *fb = device->fb; 69 struct nvkm_fb *fb = device->fb;
70 void *data = *pdata; 70 void *data = *pdata;
71 u32 size = *psize; 71 u32 size = *psize;
72 int ret; 72 int ret = -ENOSYS;
73 73
74 nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object); 74 nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
75 dmaobj->func = func; 75 dmaobj->func = func;
@@ -77,7 +77,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
77 RB_CLEAR_NODE(&dmaobj->rb); 77 RB_CLEAR_NODE(&dmaobj->rb);
78 78
79 nvif_ioctl(parent, "create dma size %d\n", *psize); 79 nvif_ioctl(parent, "create dma size %d\n", *psize);
80 if (nvif_unpack(args->v0, 0, 0, true)) { 80 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
81 nvif_ioctl(parent, "create dma vers %d target %d access %d " 81 nvif_ioctl(parent, "create dma vers %d target %d access %d "
82 "start %016llx limit %016llx\n", 82 "start %016llx limit %016llx\n",
83 args->v0.version, args->v0.target, args->v0.access, 83 args->v0.version, args->v0.target, args->v0.access,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
index 13e341cc4e32..ef7ac360101e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
@@ -28,7 +28,7 @@
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <subdev/fb.h> 29#include <subdev/fb.h>
30 30
31#include <nvif/class.h> 31#include <nvif/cl0002.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34struct gf100_dmaobj { 34struct gf100_dmaobj {
@@ -87,10 +87,11 @@ gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
87 if (ret) 87 if (ret)
88 return ret; 88 return ret;
89 89
90 ret = -ENOSYS;
90 args = data; 91 args = data;
91 92
92 nvif_ioctl(parent, "create gf100 dma size %d\n", size); 93 nvif_ioctl(parent, "create gf100 dma size %d\n", size);
93 if (nvif_unpack(args->v0, 0, 0, false)) { 94 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
94 nvif_ioctl(parent, 95 nvif_ioctl(parent,
95 "create gf100 dma vers %d priv %d kind %02x\n", 96 "create gf100 dma vers %d priv %d kind %02x\n",
96 args->v0.version, args->v0.priv, args->v0.kind); 97 args->v0.version, args->v0.priv, args->v0.kind);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
index 0e1af8b4db84..c068cee34588 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
@@ -28,7 +28,7 @@
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <subdev/fb.h> 29#include <subdev/fb.h>
30 30
31#include <nvif/class.h> 31#include <nvif/cl0002.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34struct gf119_dmaobj { 34struct gf119_dmaobj {
@@ -85,10 +85,11 @@ gf119_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
85 if (ret) 85 if (ret)
86 return ret; 86 return ret;
87 87
88 ret = -ENOSYS;
88 args = data; 89 args = data;
89 90
90 nvif_ioctl(parent, "create gf119 dma size %d\n", size); 91 nvif_ioctl(parent, "create gf119 dma size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, false)) { 92 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
92 nvif_ioctl(parent, 93 nvif_ioctl(parent,
93 "create gf100 dma vers %d page %d kind %02x\n", 94 "create gf100 dma vers %d page %d kind %02x\n",
94 args->v0.version, args->v0.page, args->v0.kind); 95 args->v0.version, args->v0.page, args->v0.kind);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
index 5b7ce313ea14..6a85b5dea643 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
@@ -28,7 +28,7 @@
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <subdev/fb.h> 29#include <subdev/fb.h>
30 30
31#include <nvif/class.h> 31#include <nvif/cl0002.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34struct nv50_dmaobj { 34struct nv50_dmaobj {
@@ -87,10 +87,11 @@ nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
87 if (ret) 87 if (ret)
88 return ret; 88 return ret;
89 89
90 ret = -ENOSYS;
90 args = data; 91 args = data;
91 92
92 nvif_ioctl(parent, "create nv50 dma size %d\n", size); 93 nvif_ioctl(parent, "create nv50 dma size %d\n", size);
93 if (nvif_unpack(args->v0, 0, 0, false)) { 94 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
94 nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d " 95 nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
95 "comp %d kind %02x\n", args->v0.version, 96 "comp %d kind %02x\n", args->v0.version,
96 args->v0.priv, args->v0.part, args->v0.comp, 97 args->v0.priv, args->v0.part, args->v0.comp,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 1fbbfbe6ca9c..cfc7d5725a61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -129,9 +129,9 @@ nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
129 union { 129 union {
130 struct nvif_notify_uevent_req none; 130 struct nvif_notify_uevent_req none;
131 } *req = data; 131 } *req = data;
132 int ret; 132 int ret = -ENOSYS;
133 133
134 if (nvif_unvers(req->none)) { 134 if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
135 notify->size = sizeof(struct nvif_notify_uevent_rep); 135 notify->size = sizeof(struct nvif_notify_uevent_rep);
136 notify->types = 1; 136 notify->types = 1;
137 notify->index = 0; 137 notify->index = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index 04305241ceed..aeb3387a3fb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -28,7 +28,7 @@
28#include <subdev/mmu.h> 28#include <subdev/mmu.h>
29#include <subdev/timer.h> 29#include <subdev/timer.h>
30 30
31#include <nvif/class.h> 31#include <nvif/cl826e.h>
32 32
33int 33int
34g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, 34g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
index a5ca52c7b74f..caa914074752 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -27,6 +27,7 @@
27#include <core/ramht.h> 27#include <core/ramht.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl826e.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32static int 33static int
@@ -35,14 +36,14 @@ g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
35{ 36{
36 struct nvkm_object *parent = oclass->parent; 37 struct nvkm_object *parent = oclass->parent;
37 union { 38 union {
38 struct nv50_channel_dma_v0 v0; 39 struct g82_channel_dma_v0 v0;
39 } *args = data; 40 } *args = data;
40 struct nv50_fifo *fifo = nv50_fifo(base); 41 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan; 42 struct nv50_fifo_chan *chan;
42 int ret; 43 int ret = -ENOSYS;
43 44
44 nvif_ioctl(parent, "create channel dma size %d\n", size); 45 nvif_ioctl(parent, "create channel dma size %d\n", size);
45 if (nvif_unpack(args->v0, 0, 0, false)) { 46 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
46 nvif_ioctl(parent, "create channel dma vers %d vm %llx " 47 nvif_ioctl(parent, "create channel dma vers %d vm %llx "
47 "pushbuf %llx offset %016llx\n", 48 "pushbuf %llx offset %016llx\n",
48 args->v0.version, args->v0.vm, args->v0.pushbuf, 49 args->v0.version, args->v0.vm, args->v0.pushbuf,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index bfcc6408a772..edec30fd3ecd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -29,6 +29,7 @@
29#include <subdev/instmem.h> 29#include <subdev/instmem.h>
30 30
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/cl006b.h>
32#include <nvif/unpack.h> 33#include <nvif/unpack.h>
33 34
34void 35void
@@ -167,10 +168,10 @@ nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
167 struct nv04_fifo_chan *chan = NULL; 168 struct nv04_fifo_chan *chan = NULL;
168 struct nvkm_device *device = fifo->base.engine.subdev.device; 169 struct nvkm_device *device = fifo->base.engine.subdev.device;
169 struct nvkm_instmem *imem = device->imem; 170 struct nvkm_instmem *imem = device->imem;
170 int ret; 171 int ret = -ENOSYS;
171 172
172 nvif_ioctl(parent, "create channel dma size %d\n", size); 173 nvif_ioctl(parent, "create channel dma size %d\n", size);
173 if (nvif_unpack(args->v0, 0, 0, false)) { 174 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
174 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " 175 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
175 "offset %08x\n", args->v0.version, 176 "offset %08x\n", args->v0.version,
176 args->v0.pushbuf, args->v0.offset); 177 args->v0.pushbuf, args->v0.offset);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
index 34f68e5bd040..f5f355ff005d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -29,6 +29,7 @@
29#include <subdev/instmem.h> 29#include <subdev/instmem.h>
30 30
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/cl006b.h>
32#include <nvif/unpack.h> 33#include <nvif/unpack.h>
33 34
34static int 35static int
@@ -43,10 +44,10 @@ nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
43 struct nv04_fifo_chan *chan = NULL; 44 struct nv04_fifo_chan *chan = NULL;
44 struct nvkm_device *device = fifo->base.engine.subdev.device; 45 struct nvkm_device *device = fifo->base.engine.subdev.device;
45 struct nvkm_instmem *imem = device->imem; 46 struct nvkm_instmem *imem = device->imem;
46 int ret; 47 int ret = -ENOSYS;
47 48
48 nvif_ioctl(parent, "create channel dma size %d\n", size); 49 nvif_ioctl(parent, "create channel dma size %d\n", size);
49 if (nvif_unpack(args->v0, 0, 0, false)) { 50 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
50 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " 51 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
51 "offset %08x\n", args->v0.version, 52 "offset %08x\n", args->v0.version,
52 args->v0.pushbuf, args->v0.offset); 53 args->v0.pushbuf, args->v0.offset);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
index ed7cc9f2b540..7edc6a564b5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -29,6 +29,7 @@
29#include <subdev/instmem.h> 29#include <subdev/instmem.h>
30 30
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/cl006b.h>
32#include <nvif/unpack.h> 33#include <nvif/unpack.h>
33 34
34static int 35static int
@@ -43,10 +44,10 @@ nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
43 struct nv04_fifo_chan *chan = NULL; 44 struct nv04_fifo_chan *chan = NULL;
44 struct nvkm_device *device = fifo->base.engine.subdev.device; 45 struct nvkm_device *device = fifo->base.engine.subdev.device;
45 struct nvkm_instmem *imem = device->imem; 46 struct nvkm_instmem *imem = device->imem;
46 int ret; 47 int ret = -ENOSYS;
47 48
48 nvif_ioctl(parent, "create channel dma size %d\n", size); 49 nvif_ioctl(parent, "create channel dma size %d\n", size);
49 if (nvif_unpack(args->v0, 0, 0, false)) { 50 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
50 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " 51 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
51 "offset %08x\n", args->v0.version, 52 "offset %08x\n", args->v0.version,
52 args->v0.pushbuf, args->v0.offset); 53 args->v0.pushbuf, args->v0.offset);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
index 043b6c325949..0ec179fc40a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -29,6 +29,7 @@
29#include <subdev/instmem.h> 29#include <subdev/instmem.h>
30 30
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/cl006b.h>
32#include <nvif/unpack.h> 33#include <nvif/unpack.h>
33 34
34static bool 35static bool
@@ -188,10 +189,10 @@ nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
188 struct nv04_fifo_chan *chan = NULL; 189 struct nv04_fifo_chan *chan = NULL;
189 struct nvkm_device *device = fifo->base.engine.subdev.device; 190 struct nvkm_device *device = fifo->base.engine.subdev.device;
190 struct nvkm_instmem *imem = device->imem; 191 struct nvkm_instmem *imem = device->imem;
191 int ret; 192 int ret = -ENOSYS;
192 193
193 nvif_ioctl(parent, "create channel dma size %d\n", size); 194 nvif_ioctl(parent, "create channel dma size %d\n", size);
194 if (nvif_unpack(args->v0, 0, 0, false)) { 195 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
195 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " 196 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
196 "offset %08x\n", args->v0.version, 197 "offset %08x\n", args->v0.version,
197 args->v0.pushbuf, args->v0.offset); 198 args->v0.pushbuf, args->v0.offset);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
index 6b3b15f12c39..480bc3777be5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -27,6 +27,7 @@
27#include <core/ramht.h> 27#include <core/ramht.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl506e.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32static int 33static int
@@ -39,10 +40,10 @@ nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
39 } *args = data; 40 } *args = data;
40 struct nv50_fifo *fifo = nv50_fifo(base); 41 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan; 42 struct nv50_fifo_chan *chan;
42 int ret; 43 int ret = -ENOSYS;
43 44
44 nvif_ioctl(parent, "create channel dma size %d\n", size); 45 nvif_ioctl(parent, "create channel dma size %d\n", size);
45 if (nvif_unpack(args->v0, 0, 0, false)) { 46 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
46 nvif_ioctl(parent, "create channel dma vers %d vm %llx " 47 nvif_ioctl(parent, "create channel dma vers %d vm %llx "
47 "pushbuf %llx offset %016llx\n", 48 "pushbuf %llx offset %016llx\n",
48 args->v0.version, args->v0.vm, args->v0.pushbuf, 49 args->v0.version, args->v0.vm, args->v0.pushbuf,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index ff6fcbda615b..36a39c7fd8d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -47,7 +47,7 @@ gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
47} 47}
48 48
49void 49void
50gf100_fifo_runlist_update(struct gf100_fifo *fifo) 50gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
51{ 51{
52 struct gf100_fifo_chan *chan; 52 struct gf100_fifo_chan *chan;
53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -77,6 +77,22 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
77 mutex_unlock(&subdev->mutex); 77 mutex_unlock(&subdev->mutex);
78} 78}
79 79
80void
81gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
82{
83 mutex_lock(&fifo->base.engine.subdev.mutex);
84 list_del_init(&chan->head);
85 mutex_unlock(&fifo->base.engine.subdev.mutex);
86}
87
88void
89gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
90{
91 mutex_lock(&fifo->base.engine.subdev.mutex);
92 list_add_tail(&chan->head, &fifo->chan);
93 mutex_unlock(&fifo->base.engine.subdev.mutex);
94}
95
80static inline int 96static inline int
81gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn) 97gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
82{ 98{
@@ -139,7 +155,7 @@ gf100_fifo_recover_work(struct work_struct *work)
139 } 155 }
140 } 156 }
141 157
142 gf100_fifo_runlist_update(fifo); 158 gf100_fifo_runlist_commit(fifo);
143 nvkm_wr32(device, 0x00262c, engm); 159 nvkm_wr32(device, 0x00262c, engm);
144 nvkm_mask(device, 0x002630, engm, 0x00000000); 160 nvkm_mask(device, 0x002630, engm, 0x00000000);
145} 161}
@@ -239,7 +255,7 @@ gf100_fifo_fault_engine[] = {
239 { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 255 { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
240 { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 }, 256 { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
241 { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 }, 257 { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
242 { 0x17, "PDAEMON" }, 258 { 0x17, "PMU" },
243 {} 259 {}
244}; 260};
245 261
@@ -270,7 +286,7 @@ gf100_fifo_fault_hubclient[] = {
270 { 0x0c, "PMSPPP" }, 286 { 0x0c, "PMSPPP" },
271 { 0x0d, "PMSVLD" }, 287 { 0x0d, "PMSVLD" },
272 { 0x11, "PCOUNTER" }, 288 { 0x11, "PCOUNTER" },
273 { 0x12, "PDAEMON" }, 289 { 0x12, "PMU" },
274 { 0x14, "CCACHE" }, 290 { 0x14, "CCACHE" },
275 { 0x15, "CCACHE_POST" }, 291 { 0x15, "CCACHE_POST" },
276 {} 292 {}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index c649ca9b53e3..08c33c3ceaf7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -5,6 +5,7 @@
5 5
6#include <subdev/mmu.h> 6#include <subdev/mmu.h>
7 7
8struct gf100_fifo_chan;
8struct gf100_fifo { 9struct gf100_fifo {
9 struct nvkm_fifo base; 10 struct nvkm_fifo base;
10 11
@@ -27,5 +28,7 @@ struct gf100_fifo {
27}; 28};
28 29
29void gf100_fifo_intr_engine(struct gf100_fifo *); 30void gf100_fifo_intr_engine(struct gf100_fifo *);
30void gf100_fifo_runlist_update(struct gf100_fifo *); 31void gf100_fifo_runlist_insert(struct gf100_fifo *, struct gf100_fifo_chan *);
32void gf100_fifo_runlist_remove(struct gf100_fifo *, struct gf100_fifo_chan *);
33void gf100_fifo_runlist_commit(struct gf100_fifo *);
31#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 98970a0b7a66..4fcd147d43c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -47,7 +47,7 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
47} 47}
48 48
49void 49void
50gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) 50gk104_fifo_runlist_commit(struct gk104_fifo *fifo, u32 engine)
51{ 51{
52 struct gk104_fifo_engn *engn = &fifo->engine[engine]; 52 struct gk104_fifo_engn *engn = &fifo->engine[engine];
53 struct gk104_fifo_chan *chan; 53 struct gk104_fifo_chan *chan;
@@ -78,6 +78,22 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
78 mutex_unlock(&subdev->mutex); 78 mutex_unlock(&subdev->mutex);
79} 79}
80 80
81void
82gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
83{
84 mutex_lock(&fifo->base.engine.subdev.mutex);
85 list_del_init(&chan->head);
86 mutex_unlock(&fifo->base.engine.subdev.mutex);
87}
88
89void
90gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
91{
92 mutex_lock(&fifo->base.engine.subdev.mutex);
93 list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
94 mutex_unlock(&fifo->base.engine.subdev.mutex);
95}
96
81static inline struct nvkm_engine * 97static inline struct nvkm_engine *
82gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) 98gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
83{ 99{
@@ -112,7 +128,7 @@ gk104_fifo_recover_work(struct work_struct *work)
112 nvkm_subdev_fini(&engine->subdev, false); 128 nvkm_subdev_fini(&engine->subdev, false);
113 WARN_ON(nvkm_subdev_init(&engine->subdev)); 129 WARN_ON(nvkm_subdev_init(&engine->subdev));
114 } 130 }
115 gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn)); 131 gk104_fifo_runlist_commit(fifo, gk104_fifo_subdev_engine(engn));
116 } 132 }
117 133
118 nvkm_wr32(device, 0x00262c, engm); 134 nvkm_wr32(device, 0x00262c, engm);
@@ -180,7 +196,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
180 196
181 spin_lock_irqsave(&fifo->base.lock, flags); 197 spin_lock_irqsave(&fifo->base.lock, flags);
182 for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) { 198 for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
183 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); 199 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
184 u32 busy = (stat & 0x80000000); 200 u32 busy = (stat & 0x80000000);
185 u32 next = (stat & 0x07ff0000) >> 16; 201 u32 next = (stat & 0x07ff0000) >> 16;
186 u32 chsw = (stat & 0x00008000); 202 u32 chsw = (stat & 0x00008000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 5afd9b5ec5d1..bec519d8f91e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -5,6 +5,7 @@
5 5
6#include <subdev/mmu.h> 6#include <subdev/mmu.h>
7 7
8struct gk104_fifo_chan;
8struct gk104_fifo_engn { 9struct gk104_fifo_engn {
9 struct nvkm_memory *runlist[2]; 10 struct nvkm_memory *runlist[2];
10 int cur_runlist; 11 int cur_runlist;
@@ -35,7 +36,9 @@ void gk104_fifo_fini(struct nvkm_fifo *);
35void gk104_fifo_intr(struct nvkm_fifo *); 36void gk104_fifo_intr(struct nvkm_fifo *);
36void gk104_fifo_uevent_init(struct nvkm_fifo *); 37void gk104_fifo_uevent_init(struct nvkm_fifo *);
37void gk104_fifo_uevent_fini(struct nvkm_fifo *); 38void gk104_fifo_uevent_fini(struct nvkm_fifo *);
38void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine); 39void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
40void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
41void gk104_fifo_runlist_commit(struct gk104_fifo *, u32 engine);
39 42
40static inline u64 43static inline u64
41gk104_fifo_engine_subdev(int engine) 44gk104_fifo_engine_subdev(int engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
index 820132363f68..77c2f2a28bf3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -27,6 +27,7 @@
27#include <core/ramht.h> 27#include <core/ramht.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl826f.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32static int 33static int
@@ -35,15 +36,15 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
35{ 36{
36 struct nvkm_object *parent = oclass->parent; 37 struct nvkm_object *parent = oclass->parent;
37 union { 38 union {
38 struct nv50_channel_gpfifo_v0 v0; 39 struct g82_channel_gpfifo_v0 v0;
39 } *args = data; 40 } *args = data;
40 struct nv50_fifo *fifo = nv50_fifo(base); 41 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan; 42 struct nv50_fifo_chan *chan;
42 u64 ioffset, ilength; 43 u64 ioffset, ilength;
43 int ret; 44 int ret = -ENOSYS;
44 45
45 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 46 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, false)) { 47 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
47 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx " 48 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
48 "pushbuf %llx ioffset %016llx " 49 "pushbuf %llx ioffset %016llx "
49 "ilength %08x\n", 50 "ilength %08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index e7cbc139c1d4..cbc67f262322 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -29,6 +29,7 @@
29#include <subdev/timer.h> 29#include <subdev/timer.h>
30 30
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/cl906f.h>
32#include <nvif/unpack.h> 33#include <nvif/unpack.h>
33 34
34static u32 35static u32
@@ -138,9 +139,9 @@ gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
138 u32 coff = chan->base.chid * 8; 139 u32 coff = chan->base.chid * 8;
139 140
140 if (!list_empty(&chan->head) && !chan->killed) { 141 if (!list_empty(&chan->head) && !chan->killed) {
141 list_del_init(&chan->head); 142 gf100_fifo_runlist_remove(fifo, chan);
142 nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000); 143 nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
143 gf100_fifo_runlist_update(fifo); 144 gf100_fifo_runlist_commit(fifo);
144 } 145 }
145 146
146 gf100_fifo_intr_engine(fifo); 147 gf100_fifo_intr_engine(fifo);
@@ -160,9 +161,9 @@ gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
160 nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr); 161 nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
161 162
162 if (list_empty(&chan->head) && !chan->killed) { 163 if (list_empty(&chan->head) && !chan->killed) {
163 list_add_tail(&chan->head, &fifo->chan); 164 gf100_fifo_runlist_insert(fifo, chan);
164 nvkm_wr32(device, 0x003004 + coff, 0x001f0001); 165 nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
165 gf100_fifo_runlist_update(fifo); 166 gf100_fifo_runlist_commit(fifo);
166 } 167 }
167} 168}
168 169
@@ -199,10 +200,10 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
199 struct nvkm_object *parent = oclass->parent; 200 struct nvkm_object *parent = oclass->parent;
200 struct gf100_fifo_chan *chan; 201 struct gf100_fifo_chan *chan;
201 u64 usermem, ioffset, ilength; 202 u64 usermem, ioffset, ilength;
202 int ret, i; 203 int ret = -ENOSYS, i;
203 204
204 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 205 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
205 if (nvif_unpack(args->v0, 0, 0, false)) { 206 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
206 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx " 207 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
207 "ioffset %016llx ilength %08x\n", 208 "ioffset %016llx ilength %08x\n",
208 args->v0.version, args->v0.vm, args->v0.ioffset, 209 args->v0.version, args->v0.vm, args->v0.ioffset,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 0b817540a9e4..2e1df01bd928 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -30,6 +30,7 @@
30#include <subdev/timer.h> 30#include <subdev/timer.h>
31 31
32#include <nvif/class.h> 32#include <nvif/class.h>
33#include <nvif/cla06f.h>
33#include <nvif/unpack.h> 34#include <nvif/unpack.h>
34 35
35static int 36static int
@@ -151,9 +152,9 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
151 u32 coff = chan->base.chid * 8; 152 u32 coff = chan->base.chid * 8;
152 153
153 if (!list_empty(&chan->head)) { 154 if (!list_empty(&chan->head)) {
154 list_del_init(&chan->head); 155 gk104_fifo_runlist_remove(fifo, chan);
155 nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800); 156 nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
156 gk104_fifo_runlist_update(fifo, chan->engine); 157 gk104_fifo_runlist_commit(fifo, chan->engine);
157 } 158 }
158 159
159 nvkm_wr32(device, 0x800000 + coff, 0x00000000); 160 nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -172,9 +173,9 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
172 nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr); 173 nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
173 174
174 if (list_empty(&chan->head) && !chan->killed) { 175 if (list_empty(&chan->head) && !chan->killed) {
175 list_add_tail(&chan->head, &fifo->engine[chan->engine].chan); 176 gk104_fifo_runlist_insert(fifo, chan);
176 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400); 177 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
177 gk104_fifo_runlist_update(fifo, chan->engine); 178 gk104_fifo_runlist_commit(fifo, chan->engine);
178 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400); 179 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
179 } 180 }
180} 181}
@@ -213,10 +214,10 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
213 struct gk104_fifo_chan *chan; 214 struct gk104_fifo_chan *chan;
214 u64 usermem, ioffset, ilength; 215 u64 usermem, ioffset, ilength;
215 u32 engines; 216 u32 engines;
216 int ret, i; 217 int ret = -ENOSYS, i;
217 218
218 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 219 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
219 if (nvif_unpack(args->v0, 0, 0, false)) { 220 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
220 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx " 221 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
221 "ioffset %016llx ilength %08x engine %08x\n", 222 "ioffset %016llx ilength %08x engine %08x\n",
222 args->v0.version, args->v0.vm, args->v0.ioffset, 223 args->v0.version, args->v0.vm, args->v0.ioffset,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
index a8c69f878221..c5a7de9db259 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -27,6 +27,7 @@
27#include <core/ramht.h> 27#include <core/ramht.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/cl506f.h>
30#include <nvif/unpack.h> 31#include <nvif/unpack.h>
31 32
32static int 33static int
@@ -40,10 +41,10 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
40 struct nv50_fifo *fifo = nv50_fifo(base); 41 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan; 42 struct nv50_fifo_chan *chan;
42 u64 ioffset, ilength; 43 u64 ioffset, ilength;
43 int ret; 44 int ret = -ENOSYS;
44 45
45 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 46 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, false)) { 47 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
47 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx " 48 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
48 "pushbuf %llx ioffset %016llx " 49 "pushbuf %llx ioffset %016llx "
49 "ilength %08x\n", 50 "ilength %08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index ddaa16a71c84..ad0a6cfe7580 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -55,7 +55,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
55 55
56 gk104_grctx_generate_rop_active_fbps(gr); 56 gk104_grctx_generate_rop_active_fbps(gr);
57 57
58 nvkm_mask(device, 0x5044b0, 0x8000000, 0x8000000); 58 nvkm_mask(device, 0x5044b0, 0x08000000, 0x08000000);
59 59
60 gf100_gr_wait_idle(gr); 60 gf100_gr_wait_idle(gr);
61 61
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index 7dacb3cc0668..e168b83a10c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -247,10 +247,7 @@ init:
247 tpc_strand_info(-1); 247 tpc_strand_info(-1);
248 248
249 ld b32 $r4 D[$r0 + #tpc_count] 249 ld b32 $r4 D[$r0 + #tpc_count]
250 mov $r5 NV_PGRAPH_GPC0_TPC0 250 gpc_addr($r5, NV_PGRAPH_GPC0_TPC0)
251 ld b32 $r6 D[$r0 + #gpc_id]
252 shl b32 $r6 15
253 add b32 $r5 $r6
254 tpc_strand_init_tpc_loop: 251 tpc_strand_init_tpc_loop:
255 add b32 $r14 $r5 NV_TPC_STRAND_CNT 252 add b32 $r14 $r5 NV_TPC_STRAND_CNT
256 call(nv_rd32) 253 call(nv_rd32)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 11bf363a6ae9..5136f9161706 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = {
289 0x020014fe, 289 0x020014fe,
290 0x12004002, 290 0x12004002,
291 0xbd0002f6, 291 0xbd0002f6,
292 0x05b34104, 292 0x05ad4104,
293 0x400010fe, 293 0x400010fe,
294 0x00f60700, 294 0x00f60700,
295 0x0204bd00, 295 0x0204bd00,
@@ -387,180 +387,180 @@ uint32_t gm107_grgpc_code[] = {
387 0x7e00008f, 387 0x7e00008f,
388 0x98000314, 388 0x98000314,
389 0x00850504, 389 0x00850504,
390 0x06985040, 390 0x55f05040,
391 0x0f64b604, 391/* 0x04dd: tpc_strand_init_tpc_loop */
392/* 0x04e3: tpc_strand_init_tpc_loop */ 392 0x705eb801,
393 0xb80056bb, 393 0x657e0005,
394 0x0005705e, 394 0xf6b20000,
395 0x0000657e, 395/* 0x04ea: tpc_strand_init_idx_loop */
396 0x74bdf6b2, 396 0x5eb874bd,
397/* 0x04f0: tpc_strand_init_idx_loop */ 397 0xb2000560,
398 0x05605eb8, 398 0x008f7e7f,
399 0x7e7fb200, 399 0x885eb800,
400 0xb800008f, 400 0x2f950005,
401 0x0005885e, 401 0x008f7e08,
402 0x7e082f95, 402 0x8c5eb800,
403 0xb800008f, 403 0x2f950005,
404 0x00058c5e, 404 0x008f7e08,
405 0x7e082f95, 405 0x905eb800,
406 0xb800008f, 406 0x657e0005,
407 0x0005905e, 407 0xf5b60000,
408 0x0000657e, 408 0x01f0b606,
409 0xb606f5b6, 409 0xbb08f4b6,
410 0xf4b601f0, 410 0x3fbb002f,
411 0x002fbb08, 411 0x0170b600,
412 0xb6003fbb, 412 0xf40162b6,
413 0x62b60170, 413 0x50b7bf1b,
414 0xbf1bf401, 414 0x42b60800,
415 0x080050b7, 415 0xa81bf401,
416 0xf40142b6,
417 0x3f0fa81b,
418 0x501d608e,
419 0xb201e5f0,
420 0x008f7eff,
421 0x8e0d0f00,
422 0xf0501da8,
423 0xffb201e5,
424 0x00008f7e,
425 0x0003147e,
426 0x02010080,
427 0xbd0003f6,
428 0xf024bd04,
429 0x00801f29,
430 0x02f60230,
431/* 0x0577: main */
432 0xf404bd00,
433 0x28f40031,
434 0x7e240d00,
435 0xf4000037,
436 0xe4b0f401,
437 0x1d18f404,
438 0x020181fe,
439 0xfd20bd06,
440 0xe4b60412,
441 0x051efd01,
442 0x7e0018fe,
443 0xf400064a,
444/* 0x05a6: main_not_ctx_xfer */
445 0xef94d40e,
446 0x01f5f010,
447 0x0002f87e,
448/* 0x05b3: ih */
449 0xf9c70ef4,
450 0x0188fe80,
451 0x90f980f9,
452 0xb0f9a0f9,
453 0xe0f9d0f9,
454 0x04bdf0f9,
455 0xcf02004a,
456 0xabc400aa,
457 0x1f0bf404,
458 0x004e240d,
459 0x00eecf1a,
460 0xcf19004f,
461 0x047e00ff,
462 0x010e0000,
463 0xf61d0040,
464 0x04bd000e,
465/* 0x05f0: ih_no_fifo */
466 0xf6010040,
467 0x04bd000a,
468 0xe0fcf0fc,
469 0xb0fcd0fc,
470 0x90fca0fc,
471 0x88fe80fc,
472 0xf480fc00,
473 0x01f80032,
474/* 0x0610: hub_barrier_done */
475 0x0e98010f,
476 0x04febb04,
477 0x188effb2,
478 0x8f7e4094,
479 0x00f80000,
480/* 0x0624: ctx_redswitch */
481 0x0080200f,
482 0x0ff60185,
483 0x0e04bd00,
484/* 0x0631: ctx_redswitch_delay */
485 0x01e2b608,
486 0xf1fd1bf4,
487 0xf10800f5,
488 0x800200f5,
489 0xf6018500,
490 0x04bd000f,
491/* 0x064a: ctx_xfer */
492 0x008000f8,
493 0x0ff60281,
494 0x8e04bd00,
495 0xf0501dc4,
496 0xffb201e5,
497 0x00008f7e,
498 0x7e0711f4,
499/* 0x0667: ctx_xfer_not_load */
500 0x7e000624,
501 0xbd000216,
502 0x47fc8024,
503 0x0002f602,
504 0x2cf004bd,
505 0x0320b601,
506 0x024afc80,
507 0xbd0002f6,
508 0x8e0c0f04,
509 0xf0501da8,
510 0xffb201e5,
511 0x00008f7e,
512 0x0003147e,
513 0x608e3f0f, 416 0x608e3f0f,
514 0xe5f0501d, 417 0xe5f0501d,
515 0x7effb201, 418 0x7effb201,
516 0x0f00008f, 419 0x0f00008f,
517 0x1d9c8e00, 420 0x1da88e0d,
518 0x01e5f050, 421 0x01e5f050,
519 0x8f7effb2, 422 0x8f7effb2,
520 0x010f0000, 423 0x147e0000,
521 0x0003147e, 424 0x00800003,
522 0xb601fcf0, 425 0x03f60201,
523 0xa88e03f0, 426 0xbd04bd00,
524 0xe5f0501d, 427 0x1f29f024,
525 0x7effb201, 428 0x02300080,
526 0xf000008f, 429 0xbd0002f6,
527 0xa5f001ac, 430/* 0x0571: main */
528 0x00008b02, 431 0x0031f404,
529 0x040c9850, 432 0x0d0028f4,
530 0xbb0fc4b6, 433 0x00377e24,
531 0x0c9800bc, 434 0xf401f400,
532 0x010d9800, 435 0xf404e4b0,
533 0x3d7e000e, 436 0x81fe1d18,
534 0xacf00001, 437 0xbd060201,
535 0x40008b01, 438 0x0412fd20,
536 0x040c9850, 439 0xfd01e4b6,
537 0xbb0fc4b6, 440 0x18fe051e,
538 0x0c9800bc, 441 0x06447e00,
539 0x020d9801, 442 0xd40ef400,
540 0x4e060f98, 443/* 0x05a0: main_not_ctx_xfer */
541 0x3d7e0800, 444 0xf010ef94,
542 0xacf00001, 445 0xf87e01f5,
543 0x04a5f001, 446 0x0ef40002,
544 0x5030008b, 447/* 0x05ad: ih */
545 0xb6040c98, 448 0xfe80f9c7,
546 0xbcbb0fc4, 449 0x80f90188,
547 0x020c9800, 450 0xa0f990f9,
548 0x98030d98, 451 0xd0f9b0f9,
549 0x004e080f, 452 0xf0f9e0f9,
550 0x013d7e02, 453 0x004a04bd,
551 0x020a7e00, 454 0x00aacf02,
552 0x03147e00, 455 0xf404abc4,
553 0x0601f400, 456 0x240d1f0b,
554/* 0x073f: ctx_xfer_post */ 457 0xcf1a004e,
555 0x7e1a12f4, 458 0x004f00ee,
556 0x0f000227, 459 0x00ffcf19,
557 0x1da88e0d, 460 0x0000047e,
461 0x0040010e,
462 0x000ef61d,
463/* 0x05ea: ih_no_fifo */
464 0x004004bd,
465 0x000af601,
466 0xf0fc04bd,
467 0xd0fce0fc,
468 0xa0fcb0fc,
469 0x80fc90fc,
470 0xfc0088fe,
471 0x0032f480,
472/* 0x060a: hub_barrier_done */
473 0x010f01f8,
474 0xbb040e98,
475 0xffb204fe,
476 0x4094188e,
477 0x00008f7e,
478/* 0x061e: ctx_redswitch */
479 0x200f00f8,
480 0x01850080,
481 0xbd000ff6,
482/* 0x062b: ctx_redswitch_delay */
483 0xb6080e04,
484 0x1bf401e2,
485 0x00f5f1fd,
486 0x00f5f108,
487 0x85008002,
488 0x000ff601,
489 0x00f804bd,
490/* 0x0644: ctx_xfer */
491 0x02810080,
492 0xbd000ff6,
493 0x1dc48e04,
494 0x01e5f050,
495 0x8f7effb2,
496 0x11f40000,
497 0x061e7e07,
498/* 0x0661: ctx_xfer_not_load */
499 0x02167e00,
500 0x8024bd00,
501 0xf60247fc,
502 0x04bd0002,
503 0xb6012cf0,
504 0xfc800320,
505 0x02f6024a,
506 0x0f04bd00,
507 0x1da88e0c,
558 0x01e5f050, 508 0x01e5f050,
559 0x8f7effb2, 509 0x8f7effb2,
560 0x147e0000, 510 0x147e0000,
561/* 0x0756: ctx_xfer_done */ 511 0x3f0f0003,
562 0x107e0003, 512 0x501d608e,
563 0x00f80006, 513 0xb201e5f0,
514 0x008f7eff,
515 0x8e000f00,
516 0xf0501d9c,
517 0xffb201e5,
518 0x00008f7e,
519 0x147e010f,
520 0xfcf00003,
521 0x03f0b601,
522 0x501da88e,
523 0xb201e5f0,
524 0x008f7eff,
525 0x01acf000,
526 0x8b02a5f0,
527 0x98500000,
528 0xc4b6040c,
529 0x00bcbb0f,
530 0x98000c98,
531 0x000e010d,
532 0x00013d7e,
533 0x8b01acf0,
534 0x98504000,
535 0xc4b6040c,
536 0x00bcbb0f,
537 0x98010c98,
538 0x0f98020d,
539 0x08004e06,
540 0x00013d7e,
541 0xf001acf0,
542 0x008b04a5,
543 0x0c985030,
544 0x0fc4b604,
545 0x9800bcbb,
546 0x0d98020c,
547 0x080f9803,
548 0x7e02004e,
549 0x7e00013d,
550 0x7e00020a,
551 0xf4000314,
552 0x12f40601,
553/* 0x0739: ctx_xfer_post */
554 0x02277e1a,
555 0x8e0d0f00,
556 0xf0501da8,
557 0xffb201e5,
558 0x00008f7e,
559 0x0003147e,
560/* 0x0750: ctx_xfer_done */
561 0x00060a7e,
562 0x000000f8,
563 0x00000000,
564 0x00000000, 564 0x00000000,
565 0x00000000, 565 0x00000000,
566 0x00000000, 566 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 9f5dfc85147a..1f81069edc58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -34,6 +34,7 @@
34#include <engine/fifo.h> 34#include <engine/fifo.h>
35 35
36#include <nvif/class.h> 36#include <nvif/class.h>
37#include <nvif/cl9097.h>
37#include <nvif/unpack.h> 38#include <nvif/unpack.h>
38 39
39/******************************************************************************* 40/*******************************************************************************
@@ -139,6 +140,12 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
139/******************************************************************************* 140/*******************************************************************************
140 * Graphics object classes 141 * Graphics object classes
141 ******************************************************************************/ 142 ******************************************************************************/
143#define gf100_gr_object(p) container_of((p), struct gf100_gr_object, object)
144
145struct gf100_gr_object {
146 struct nvkm_object object;
147 struct gf100_gr_chan *chan;
148};
142 149
143static int 150static int
144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 151gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
@@ -147,9 +154,9 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
147 union { 154 union {
148 struct fermi_a_zbc_color_v0 v0; 155 struct fermi_a_zbc_color_v0 v0;
149 } *args = data; 156 } *args = data;
150 int ret; 157 int ret = -ENOSYS;
151 158
152 if (nvif_unpack(args->v0, 0, 0, false)) { 159 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
153 switch (args->v0.format) { 160 switch (args->v0.format) {
154 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: 161 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO:
155 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: 162 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE:
@@ -193,9 +200,9 @@ gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
193 union { 200 union {
194 struct fermi_a_zbc_depth_v0 v0; 201 struct fermi_a_zbc_depth_v0 v0;
195 } *args = data; 202 } *args = data;
196 int ret; 203 int ret = -ENOSYS;
197 204
198 if (nvif_unpack(args->v0, 0, 0, false)) { 205 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
199 switch (args->v0.format) { 206 switch (args->v0.format) {
200 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: 207 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
201 ret = gf100_gr_zbc_depth_get(gr, args->v0.format, 208 ret = gf100_gr_zbc_depth_get(gr, args->v0.format,
@@ -213,6 +220,7 @@ gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
213static int 220static int
214gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 221gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
215{ 222{
223 nvif_ioctl(object, "fermi mthd %08x\n", mthd);
216 switch (mthd) { 224 switch (mthd) {
217 case FERMI_A_ZBC_COLOR: 225 case FERMI_A_ZBC_COLOR:
218 return gf100_fermi_mthd_zbc_color(object, data, size); 226 return gf100_fermi_mthd_zbc_color(object, data, size);
@@ -256,6 +264,27 @@ gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
256 return false; 264 return false;
257} 265}
258 266
267static const struct nvkm_object_func
268gf100_gr_object_func = {
269};
270
271static int
272gf100_gr_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
273 struct nvkm_object **pobject)
274{
275 struct gf100_gr_chan *chan = gf100_gr_chan(oclass->parent);
276 struct gf100_gr_object *object;
277
278 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
279 return -ENOMEM;
280 *pobject = &object->object;
281
282 nvkm_object_ctor(oclass->base.func ? oclass->base.func :
283 &gf100_gr_object_func, oclass, &object->object);
284 object->chan = chan;
285 return 0;
286}
287
259static int 288static int
260gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass) 289gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
261{ 290{
@@ -265,6 +294,7 @@ gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
265 while (gr->func->sclass[c].oclass) { 294 while (gr->func->sclass[c].oclass) {
266 if (c++ == index) { 295 if (c++ == index) {
267 *sclass = gr->func->sclass[index]; 296 *sclass = gr->func->sclass[index];
297 sclass->ctor = gf100_gr_object_new;
268 return index; 298 return index;
269 } 299 }
270 } 300 }
@@ -826,7 +856,41 @@ gf100_gr_units(struct nvkm_gr *base)
826 return cfg; 856 return cfg;
827} 857}
828 858
859static const struct nvkm_bitfield gf100_dispatch_error[] = {
860 { 0x00000001, "INJECTED_BUNDLE_ERROR" },
861 { 0x00000002, "CLASS_SUBCH_MISMATCH" },
862 { 0x00000004, "SUBCHSW_DURING_NOTIFY" },
863 {}
864};
865
866static const struct nvkm_bitfield gf100_m2mf_error[] = {
867 { 0x00000001, "PUSH_TOO_MUCH_DATA" },
868 { 0x00000002, "PUSH_NOT_ENOUGH_DATA" },
869 {}
870};
871
872static const struct nvkm_bitfield gf100_unk6_error[] = {
873 { 0x00000001, "TEMP_TOO_SMALL" },
874 {}
875};
876
877static const struct nvkm_bitfield gf100_ccache_error[] = {
878 { 0x00000001, "INTR" },
879 { 0x00000002, "LDCONST_OOB" },
880 {}
881};
882
883static const struct nvkm_bitfield gf100_macro_error[] = {
884 { 0x00000001, "TOO_FEW_PARAMS" },
885 { 0x00000002, "TOO_MANY_PARAMS" },
886 { 0x00000004, "ILLEGAL_OPCODE" },
887 { 0x00000008, "DOUBLE_BRANCH" },
888 { 0x00000010, "WATCHDOG" },
889 {}
890};
891
829static const struct nvkm_bitfield gk104_sked_error[] = { 892static const struct nvkm_bitfield gk104_sked_error[] = {
893 { 0x00000040, "CTA_RESUME" },
830 { 0x00000080, "CONSTANT_BUFFER_SIZE" }, 894 { 0x00000080, "CONSTANT_BUFFER_SIZE" },
831 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" }, 895 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" },
832 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" }, 896 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" },
@@ -836,6 +900,8 @@ static const struct nvkm_bitfield gk104_sked_error[] = {
836 { 0x00040000, "TOTAL_THREADS" }, 900 { 0x00040000, "TOTAL_THREADS" },
837 { 0x00100000, "PROGRAM_OFFSET" }, 901 { 0x00100000, "PROGRAM_OFFSET" },
838 { 0x00200000, "SHARED_MEMORY_SIZE" }, 902 { 0x00200000, "SHARED_MEMORY_SIZE" },
903 { 0x00800000, "CTA_THREAD_DIMENSION_ZERO" },
904 { 0x01000000, "MEMORY_WINDOW_OVERLAP" },
839 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" }, 905 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" },
840 { 0x04000000, "TOTAL_REGISTER_COUNT" }, 906 { 0x04000000, "TOTAL_REGISTER_COUNT" },
841 {} 907 {}
@@ -1005,12 +1071,16 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1005{ 1071{
1006 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1072 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1007 struct nvkm_device *device = subdev->device; 1073 struct nvkm_device *device = subdev->device;
1074 char error[128];
1008 u32 trap = nvkm_rd32(device, 0x400108); 1075 u32 trap = nvkm_rd32(device, 0x400108);
1009 int rop, gpc; 1076 int rop, gpc;
1010 1077
1011 if (trap & 0x00000001) { 1078 if (trap & 0x00000001) {
1012 u32 stat = nvkm_rd32(device, 0x404000); 1079 u32 stat = nvkm_rd32(device, 0x404000);
1013 nvkm_error(subdev, "DISPATCH %08x\n", stat); 1080
1081 nvkm_snprintbf(error, sizeof(error), gf100_dispatch_error,
1082 stat & 0x3fffffff);
1083 nvkm_error(subdev, "DISPATCH %08x [%s]\n", stat, error);
1014 nvkm_wr32(device, 0x404000, 0xc0000000); 1084 nvkm_wr32(device, 0x404000, 0xc0000000);
1015 nvkm_wr32(device, 0x400108, 0x00000001); 1085 nvkm_wr32(device, 0x400108, 0x00000001);
1016 trap &= ~0x00000001; 1086 trap &= ~0x00000001;
@@ -1018,7 +1088,11 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1018 1088
1019 if (trap & 0x00000002) { 1089 if (trap & 0x00000002) {
1020 u32 stat = nvkm_rd32(device, 0x404600); 1090 u32 stat = nvkm_rd32(device, 0x404600);
1021 nvkm_error(subdev, "M2MF %08x\n", stat); 1091
1092 nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error,
1093 stat & 0x3fffffff);
1094 nvkm_error(subdev, "M2MF %08x [%s]\n", stat, error);
1095
1022 nvkm_wr32(device, 0x404600, 0xc0000000); 1096 nvkm_wr32(device, 0x404600, 0xc0000000);
1023 nvkm_wr32(device, 0x400108, 0x00000002); 1097 nvkm_wr32(device, 0x400108, 0x00000002);
1024 trap &= ~0x00000002; 1098 trap &= ~0x00000002;
@@ -1026,7 +1100,10 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1026 1100
1027 if (trap & 0x00000008) { 1101 if (trap & 0x00000008) {
1028 u32 stat = nvkm_rd32(device, 0x408030); 1102 u32 stat = nvkm_rd32(device, 0x408030);
1029 nvkm_error(subdev, "CCACHE %08x\n", stat); 1103
1104 nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error,
1105 stat & 0x3fffffff);
1106 nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error);
1030 nvkm_wr32(device, 0x408030, 0xc0000000); 1107 nvkm_wr32(device, 0x408030, 0xc0000000);
1031 nvkm_wr32(device, 0x400108, 0x00000008); 1108 nvkm_wr32(device, 0x400108, 0x00000008);
1032 trap &= ~0x00000008; 1109 trap &= ~0x00000008;
@@ -1034,7 +1111,8 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1034 1111
1035 if (trap & 0x00000010) { 1112 if (trap & 0x00000010) {
1036 u32 stat = nvkm_rd32(device, 0x405840); 1113 u32 stat = nvkm_rd32(device, 0x405840);
1037 nvkm_error(subdev, "SHADER %08x\n", stat); 1114 nvkm_error(subdev, "SHADER %08x, sph: 0x%06x, stage: 0x%02x\n",
1115 stat, stat & 0xffffff, (stat >> 24) & 0x3f);
1038 nvkm_wr32(device, 0x405840, 0xc0000000); 1116 nvkm_wr32(device, 0x405840, 0xc0000000);
1039 nvkm_wr32(device, 0x400108, 0x00000010); 1117 nvkm_wr32(device, 0x400108, 0x00000010);
1040 trap &= ~0x00000010; 1118 trap &= ~0x00000010;
@@ -1042,7 +1120,11 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1042 1120
1043 if (trap & 0x00000040) { 1121 if (trap & 0x00000040) {
1044 u32 stat = nvkm_rd32(device, 0x40601c); 1122 u32 stat = nvkm_rd32(device, 0x40601c);
1045 nvkm_error(subdev, "UNK6 %08x\n", stat); 1123
1124 nvkm_snprintbf(error, sizeof(error), gf100_unk6_error,
1125 stat & 0x3fffffff);
1126 nvkm_error(subdev, "UNK6 %08x [%s]\n", stat, error);
1127
1046 nvkm_wr32(device, 0x40601c, 0xc0000000); 1128 nvkm_wr32(device, 0x40601c, 0xc0000000);
1047 nvkm_wr32(device, 0x400108, 0x00000040); 1129 nvkm_wr32(device, 0x400108, 0x00000040);
1048 trap &= ~0x00000040; 1130 trap &= ~0x00000040;
@@ -1050,7 +1132,16 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1050 1132
1051 if (trap & 0x00000080) { 1133 if (trap & 0x00000080) {
1052 u32 stat = nvkm_rd32(device, 0x404490); 1134 u32 stat = nvkm_rd32(device, 0x404490);
1053 nvkm_error(subdev, "MACRO %08x\n", stat); 1135 u32 pc = nvkm_rd32(device, 0x404494);
1136 u32 op = nvkm_rd32(device, 0x40449c);
1137
1138 nvkm_snprintbf(error, sizeof(error), gf100_macro_error,
1139 stat & 0x1fffffff);
1140 nvkm_error(subdev, "MACRO %08x [%s], pc: 0x%03x%s, op: 0x%08x\n",
1141 stat, error, pc & 0x7ff,
1142 (pc & 0x10000000) ? "" : " (invalid)",
1143 op);
1144
1054 nvkm_wr32(device, 0x404490, 0xc0000000); 1145 nvkm_wr32(device, 0x404490, 0xc0000000);
1055 nvkm_wr32(device, 0x400108, 0x00000080); 1146 nvkm_wr32(device, 0x400108, 0x00000080);
1056 trap &= ~0x00000080; 1147 trap &= ~0x00000080;
@@ -1058,10 +1149,9 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1058 1149
1059 if (trap & 0x00000100) { 1150 if (trap & 0x00000100) {
1060 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff; 1151 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff;
1061 char sked[128];
1062 1152
1063 nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat); 1153 nvkm_snprintbf(error, sizeof(error), gk104_sked_error, stat);
1064 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked); 1154 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, error);
1065 1155
1066 if (stat) 1156 if (stat)
1067 nvkm_wr32(device, 0x407020, 0x40000000); 1157 nvkm_wr32(device, 0x407020, 0x40000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2721592d3031..f19fabef8d73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -27,6 +27,8 @@
27#include <core/option.h> 27#include <core/option.h>
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/if0002.h>
31#include <nvif/if0003.h>
30#include <nvif/ioctl.h> 32#include <nvif/ioctl.h>
31#include <nvif/unpack.h> 33#include <nvif/unpack.h>
32 34
@@ -210,10 +212,10 @@ nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
210 } *args = data; 212 } *args = data;
211 struct nvkm_object *object = &dom->object; 213 struct nvkm_object *object = &dom->object;
212 struct nvkm_pm *pm = dom->perfmon->pm; 214 struct nvkm_pm *pm = dom->perfmon->pm;
213 int ret, i; 215 int ret = -ENOSYS, i;
214 216
215 nvif_ioctl(object, "perfdom init size %d\n", size); 217 nvif_ioctl(object, "perfdom init size %d\n", size);
216 if (nvif_unvers(args->none)) { 218 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
217 nvif_ioctl(object, "perfdom init\n"); 219 nvif_ioctl(object, "perfdom init\n");
218 } else 220 } else
219 return ret; 221 return ret;
@@ -240,10 +242,10 @@ nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
240 } *args = data; 242 } *args = data;
241 struct nvkm_object *object = &dom->object; 243 struct nvkm_object *object = &dom->object;
242 struct nvkm_pm *pm = dom->perfmon->pm; 244 struct nvkm_pm *pm = dom->perfmon->pm;
243 int ret; 245 int ret = -ENOSYS;
244 246
245 nvif_ioctl(object, "perfdom sample size %d\n", size); 247 nvif_ioctl(object, "perfdom sample size %d\n", size);
246 if (nvif_unvers(args->none)) { 248 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
247 nvif_ioctl(object, "perfdom sample\n"); 249 nvif_ioctl(object, "perfdom sample\n");
248 } else 250 } else
249 return ret; 251 return ret;
@@ -264,10 +266,10 @@ nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
264 } *args = data; 266 } *args = data;
265 struct nvkm_object *object = &dom->object; 267 struct nvkm_object *object = &dom->object;
266 struct nvkm_pm *pm = dom->perfmon->pm; 268 struct nvkm_pm *pm = dom->perfmon->pm;
267 int ret, i; 269 int ret = -ENOSYS, i;
268 270
269 nvif_ioctl(object, "perfdom read size %d\n", size); 271 nvif_ioctl(object, "perfdom read size %d\n", size);
270 if (nvif_unpack(args->v0, 0, 0, false)) { 272 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
271 nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version); 273 nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
272 } else 274 } else
273 return ret; 275 return ret;
@@ -374,10 +376,10 @@ nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
374 struct nvkm_perfctr *ctr[4] = {}; 376 struct nvkm_perfctr *ctr[4] = {};
375 struct nvkm_perfdom *dom; 377 struct nvkm_perfdom *dom;
376 int c, s, m; 378 int c, s, m;
377 int ret; 379 int ret = -ENOSYS;
378 380
379 nvif_ioctl(parent, "create perfdom size %d\n", size); 381 nvif_ioctl(parent, "create perfdom size %d\n", size);
380 if (nvif_unpack(args->v0, 0, 0, false)) { 382 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
381 nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n", 383 nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
382 args->v0.version, args->v0.domain, args->v0.mode); 384 args->v0.version, args->v0.domain, args->v0.mode);
383 } else 385 } else
@@ -439,10 +441,10 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
439 struct nvkm_pm *pm = perfmon->pm; 441 struct nvkm_pm *pm = perfmon->pm;
440 struct nvkm_perfdom *dom; 442 struct nvkm_perfdom *dom;
441 u8 domain_nr; 443 u8 domain_nr;
442 int di, ret; 444 int di, ret = -ENOSYS;
443 445
444 nvif_ioctl(object, "perfmon query domain size %d\n", size); 446 nvif_ioctl(object, "perfmon query domain size %d\n", size);
445 if (nvif_unpack(args->v0, 0, 0, false)) { 447 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
446 nvif_ioctl(object, "perfmon domain vers %d iter %02x\n", 448 nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
447 args->v0.version, args->v0.iter); 449 args->v0.version, args->v0.iter);
448 di = (args->v0.iter & 0xff) - 1; 450 di = (args->v0.iter & 0xff) - 1;
@@ -490,10 +492,10 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
490 struct nvkm_perfsig *sig; 492 struct nvkm_perfsig *sig;
491 const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false); 493 const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
492 const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all); 494 const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
493 int ret, si; 495 int ret = -ENOSYS, si;
494 496
495 nvif_ioctl(object, "perfmon query signal size %d\n", size); 497 nvif_ioctl(object, "perfmon query signal size %d\n", size);
496 if (nvif_unpack(args->v0, 0, 0, false)) { 498 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
497 nvif_ioctl(object, 499 nvif_ioctl(object,
498 "perfmon query signal vers %d dom %d iter %04x\n", 500 "perfmon query signal vers %d dom %d iter %04x\n",
499 args->v0.version, args->v0.domain, args->v0.iter); 501 args->v0.version, args->v0.domain, args->v0.iter);
@@ -543,10 +545,10 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
543 struct nvkm_perfsig *sig; 545 struct nvkm_perfsig *sig;
544 struct nvkm_perfsrc *src; 546 struct nvkm_perfsrc *src;
545 u8 source_nr = 0; 547 u8 source_nr = 0;
546 int si, ret; 548 int si, ret = -ENOSYS;
547 549
548 nvif_ioctl(object, "perfmon query source size %d\n", size); 550 nvif_ioctl(object, "perfmon query source size %d\n", size);
549 if (nvif_unpack(args->v0, 0, 0, false)) { 551 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
550 nvif_ioctl(object, 552 nvif_ioctl(object,
551 "perfmon source vers %d dom %d sig %02x iter %02x\n", 553 "perfmon source vers %d dom %d sig %02x iter %02x\n",
552 args->v0.version, args->v0.domain, args->v0.signal, 554 args->v0.version, args->v0.domain, args->v0.signal,
@@ -612,7 +614,7 @@ nvkm_perfmon_child_get(struct nvkm_object *object, int index,
612 struct nvkm_oclass *oclass) 614 struct nvkm_oclass *oclass)
613{ 615{
614 if (index == 0) { 616 if (index == 0) {
615 oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM; 617 oclass->base.oclass = NVIF_CLASS_PERFDOM;
616 oclass->base.minver = 0; 618 oclass->base.minver = 0;
617 oclass->base.maxver = 0; 619 oclass->base.maxver = 0;
618 oclass->ctor = nvkm_perfmon_child_new; 620 oclass->ctor = nvkm_perfmon_child_new;
@@ -679,7 +681,7 @@ nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
679 681
680static const struct nvkm_device_oclass 682static const struct nvkm_device_oclass
681nvkm_pm_oclass = { 683nvkm_pm_oclass = {
682 .base.oclass = NVIF_IOCTL_NEW_V0_PERFMON, 684 .base.oclass = NVIF_CLASS_PERFMON,
683 .base.minver = -1, 685 .base.minver = -1,
684 .base.maxver = -1, 686 .base.maxver = -1,
685 .ctor = nvkm_pm_oclass_new, 687 .ctor = nvkm_pm_oclass_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
index d082f4f73a80..f28967065639 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
@@ -53,9 +53,9 @@ nvkm_sw_chan_event_ctor(struct nvkm_object *object, void *data, u32 size,
53 union { 53 union {
54 struct nvif_notify_uevent_req none; 54 struct nvif_notify_uevent_req none;
55 } *req = data; 55 } *req = data;
56 int ret; 56 int ret = -ENOSYS;
57 57
58 if (nvif_unvers(req->none)) { 58 if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
59 notify->size = sizeof(struct nvif_notify_uevent_rep); 59 notify->size = sizeof(struct nvif_notify_uevent_rep);
60 notify->types = 1; 60 notify->types = 1;
61 notify->index = 0; 61 notify->index = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index b01ef7eca906..ea8f4247b628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -28,8 +28,8 @@
28#include <engine/disp.h> 28#include <engine/disp.h>
29#include <engine/fifo.h> 29#include <engine/fifo.h>
30 30
31#include <nvif/class.h>
31#include <nvif/event.h> 32#include <nvif/event.h>
32#include <nvif/ioctl.h>
33 33
34/******************************************************************************* 34/*******************************************************************************
35 * software context 35 * software context
@@ -143,7 +143,7 @@ static const struct nvkm_sw_func
143gf100_sw = { 143gf100_sw = {
144 .chan_new = gf100_sw_chan_new, 144 .chan_new = gf100_sw_chan_new,
145 .sclass = { 145 .sclass = {
146 { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_GF100 } }, 146 { nvkm_nvsw_new, { -1, -1, NVIF_CLASS_SW_GF100 } },
147 {} 147 {}
148 } 148 }
149}; 149};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 445217ffa791..b6675fe1b0ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -27,6 +27,7 @@
27#include "nvsw.h" 27#include "nvsw.h"
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30#include <nvif/if0004.h>
30#include <nvif/ioctl.h> 31#include <nvif/ioctl.h>
31#include <nvif/unpack.h> 32#include <nvif/unpack.h>
32 33
@@ -46,9 +47,9 @@ nv04_nvsw_mthd_get_ref(struct nvkm_nvsw *nvsw, void *data, u32 size)
46 union { 47 union {
47 struct nv04_nvsw_get_ref_v0 v0; 48 struct nv04_nvsw_get_ref_v0 v0;
48 } *args = data; 49 } *args = data;
49 int ret; 50 int ret = -ENOSYS;
50 51
51 if (nvif_unpack(args->v0, 0, 0, false)) { 52 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
52 args->v0.ref = atomic_read(&chan->ref); 53 args->v0.ref = atomic_read(&chan->ref);
53 } 54 }
54 55
@@ -126,7 +127,7 @@ static const struct nvkm_sw_func
126nv04_sw = { 127nv04_sw = {
127 .chan_new = nv04_sw_chan_new, 128 .chan_new = nv04_sw_chan_new,
128 .sclass = { 129 .sclass = {
129 { nv04_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV04 } }, 130 { nv04_nvsw_new, { -1, -1, NVIF_CLASS_SW_NV04 } },
130 {} 131 {}
131 } 132 }
132}; 133};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index adf70d92b244..09d22fcd194c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -25,7 +25,7 @@
25#include "chan.h" 25#include "chan.h"
26#include "nvsw.h" 26#include "nvsw.h"
27 27
28#include <nvif/ioctl.h> 28#include <nvif/class.h>
29 29
30/******************************************************************************* 30/*******************************************************************************
31 * software context 31 * software context
@@ -56,7 +56,7 @@ static const struct nvkm_sw_func
56nv10_sw = { 56nv10_sw = {
57 .chan_new = nv10_sw_chan_new, 57 .chan_new = nv10_sw_chan_new,
58 .sclass = { 58 .sclass = {
59 { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV10 } }, 59 { nvkm_nvsw_new, { -1, -1, NVIF_CLASS_SW_NV10 } },
60 {} 60 {}
61 } 61 }
62}; 62};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index a381196af69d..01573d187f2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -28,8 +28,8 @@
28#include <engine/fifo/chan.h> 28#include <engine/fifo/chan.h>
29#include <subdev/bar.h> 29#include <subdev/bar.h>
30 30
31#include <nvif/class.h>
31#include <nvif/event.h> 32#include <nvif/event.h>
32#include <nvif/ioctl.h>
33 33
34/******************************************************************************* 34/*******************************************************************************
35 * software context 35 * software context
@@ -136,7 +136,7 @@ static const struct nvkm_sw_func
136nv50_sw = { 136nv50_sw = {
137 .chan_new = nv50_sw_chan_new, 137 .chan_new = nv50_sw_chan_new,
138 .sclass = { 138 .sclass = {
139 { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV50 } }, 139 { nvkm_nvsw_new, { -1, -1, NVIF_CLASS_SW_NV50 } },
140 {} 140 {}
141 } 141 }
142}; 142};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
index 66cf986b9572..33dd03fff3c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
@@ -24,7 +24,7 @@
24#include "nvsw.h" 24#include "nvsw.h"
25#include "chan.h" 25#include "chan.h"
26 26
27#include <nvif/class.h> 27#include <nvif/if0004.h>
28 28
29static int 29static int
30nvkm_nvsw_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size) 30nvkm_nvsw_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
@@ -41,7 +41,7 @@ nvkm_nvsw_ntfy_(struct nvkm_object *object, u32 mthd,
41{ 41{
42 struct nvkm_nvsw *nvsw = nvkm_nvsw(object); 42 struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
43 switch (mthd) { 43 switch (mthd) {
44 case NVSW_NTFY_UEVENT: 44 case NV04_NVSW_NTFY_UEVENT:
45 *pevent = &nvsw->chan->event; 45 *pevent = &nvsw->chan->event;
46 return 0; 46 return 0;
47 default: 47 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index 8304b806f2a6..a8d5d67feeaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -143,16 +143,19 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
143 switch (outp->type) { 143 switch (outp->type) {
144 case DCB_OUTPUT_DP: 144 case DCB_OUTPUT_DP:
145 switch (conf & 0x00e00000) { 145 switch (conf & 0x00e00000) {
146 case 0x00000000: 146 case 0x00000000: /* 1.62 */
147 outp->dpconf.link_bw = 0x06; 147 outp->dpconf.link_bw = 0x06;
148 break; 148 break;
149 case 0x00200000: 149 case 0x00200000: /* 2.7 */
150 outp->dpconf.link_bw = 0x0a; 150 outp->dpconf.link_bw = 0x0a;
151 break; 151 break;
152 case 0x00400000: 152 case 0x00400000: /* 5.4 */
153 default:
154 outp->dpconf.link_bw = 0x14; 153 outp->dpconf.link_bw = 0x14;
155 break; 154 break;
155 case 0x00600000: /* 8.1 */
156 default:
157 outp->dpconf.link_bw = 0x1e;
158 break;
156 } 159 }
157 160
158 switch ((conf & 0x0f000000) >> 24) { 161 switch ((conf & 0x0f000000) >> 24) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 43006db6fd58..80fed7e78dcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
83 fan->type = NVBIOS_THERM_FAN_UNK; 83 fan->type = NVBIOS_THERM_FAN_UNK;
84 } 84 }
85 85
86 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
86 fan->min_duty = nvbios_rd08(bios, data + 0x02); 87 fan->min_duty = nvbios_rd08(bios, data + 0x02);
87 fan->max_duty = nvbios_rd08(bios, data + 0x03); 88 fan->max_duty = nvbios_rd08(bios, data + 0x03);
88 89
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index aa7e33b42b30..636bfb665bb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -24,6 +24,7 @@
24#include <subdev/bios.h> 24#include <subdev/bios.h>
25#include <subdev/bios/bit.h> 25#include <subdev/bios/bit.h>
26#include <subdev/bios/perf.h> 26#include <subdev/bios/perf.h>
27#include <subdev/pci.h>
27 28
28u16 29u16
29nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, 30nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
@@ -145,6 +146,21 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
145 break; 146 break;
146 case 0x40: 147 case 0x40:
147 info->voltage = nvbios_rd08(bios, perf + 0x02); 148 info->voltage = nvbios_rd08(bios, perf + 0x02);
149 switch (nvbios_rd08(bios, perf + 0xb) & 0x3) {
150 case 0:
151 info->pcie_speed = NVKM_PCIE_SPEED_5_0;
152 break;
153 case 3:
154 case 1:
155 info->pcie_speed = NVKM_PCIE_SPEED_2_5;
156 break;
157 case 2:
158 info->pcie_speed = NVKM_PCIE_SPEED_8_0;
159 break;
160 default:
161 break;
162 }
163 info->pcie_width = 0xff;
148 break; 164 break;
149 default: 165 default:
150 return 0x0000; 166 return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index dc8682c91cc7..889cce2eb727 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -176,6 +176,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
176{ 176{
177 struct nvkm_subdev *subdev = &clk->subdev; 177 struct nvkm_subdev *subdev = &clk->subdev;
178 struct nvkm_ram *ram = subdev->device->fb->ram; 178 struct nvkm_ram *ram = subdev->device->fb->ram;
179 struct nvkm_pci *pci = subdev->device->pci;
179 struct nvkm_pstate *pstate; 180 struct nvkm_pstate *pstate;
180 int ret, idx = 0; 181 int ret, idx = 0;
181 182
@@ -187,6 +188,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
187 nvkm_debug(subdev, "setting performance state %d\n", pstatei); 188 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
188 clk->pstate = pstatei; 189 clk->pstate = pstatei;
189 190
191 nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
192
190 if (ram && ram->func->calc) { 193 if (ram && ram->func->calc) {
191 int khz = pstate->base.domain[nv_clk_src_mem]; 194 int khz = pstate->base.domain[nv_clk_src_mem];
192 do { 195 do {
@@ -330,6 +333,8 @@ nvkm_pstate_new(struct nvkm_clk *clk, int idx)
330 333
331 pstate->pstate = perfE.pstate; 334 pstate->pstate = perfE.pstate;
332 pstate->fanspeed = perfE.fanspeed; 335 pstate->fanspeed = perfE.fanspeed;
336 pstate->pcie_speed = perfE.pcie_speed;
337 pstate->pcie_width = perfE.pcie_width;
333 cstate->voltage = perfE.voltage; 338 cstate->voltage = perfE.voltage;
334 cstate->domain[nv_clk_src_core] = perfE.core; 339 cstate->domain[nv_clk_src_core] = perfE.core;
335 cstate->domain[nv_clk_src_shader] = perfE.shader; 340 cstate->domain[nv_clk_src_shader] = perfE.shader;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index a52b7e7fce41..78c449b417b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -188,7 +188,7 @@ gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
188 return read_clk(clk, 0x08); 188 return read_clk(clk, 0x08);
189 case nv_clk_src_copy: 189 case nv_clk_src_copy:
190 return read_clk(clk, 0x09); 190 return read_clk(clk, 0x09);
191 case nv_clk_src_daemon: 191 case nv_clk_src_pmu:
192 return read_clk(clk, 0x0c); 192 return read_clk(clk, 0x0c);
193 case nv_clk_src_vdec: 193 case nv_clk_src_vdec:
194 return read_clk(clk, 0x0e); 194 return read_clk(clk, 0x0e);
@@ -325,7 +325,7 @@ gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
325 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) || 325 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
326 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) || 326 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
327 (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) || 327 (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
328 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) || 328 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
329 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec))) 329 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
330 return ret; 330 return ret;
331 331
@@ -447,7 +447,7 @@ gf100_clk = {
447 { nv_clk_src_rop , 0x04 }, 447 { nv_clk_src_rop , 0x04 },
448 { nv_clk_src_mem , 0x05, 0, "memory", 1000 }, 448 { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
449 { nv_clk_src_vdec , 0x06 }, 449 { nv_clk_src_vdec , 0x06 },
450 { nv_clk_src_daemon , 0x0a }, 450 { nv_clk_src_pmu , 0x0a },
451 { nv_clk_src_hubk07 , 0x0b }, 451 { nv_clk_src_hubk07 , 0x0b },
452 { nv_clk_src_max } 452 { nv_clk_src_max }
453 } 453 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 396f7e4dad0a..975c401bccab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -209,7 +209,7 @@ gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
209 return read_clk(clk, 0x07); 209 return read_clk(clk, 0x07);
210 case nv_clk_src_hubk01: 210 case nv_clk_src_hubk01:
211 return read_clk(clk, 0x08); 211 return read_clk(clk, 0x08);
212 case nv_clk_src_daemon: 212 case nv_clk_src_pmu:
213 return read_clk(clk, 0x0c); 213 return read_clk(clk, 0x0c);
214 case nv_clk_src_vdec: 214 case nv_clk_src_vdec:
215 return read_clk(clk, 0x0e); 215 return read_clk(clk, 0x0e);
@@ -346,7 +346,7 @@ gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
346 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) || 346 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
347 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) || 347 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
348 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) || 348 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
349 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) || 349 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
350 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec))) 350 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
351 return ret; 351 return ret;
352 352
@@ -492,7 +492,7 @@ gk104_clk = {
492 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE }, 492 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
493 { nv_clk_src_hubk01 , 0x05 }, 493 { nv_clk_src_hubk01 , 0x05 },
494 { nv_clk_src_vdec , 0x06 }, 494 { nv_clk_src_vdec , 0x06 },
495 { nv_clk_src_daemon , 0x07 }, 495 { nv_clk_src_pmu , 0x07 },
496 { nv_clk_src_max } 496 { nv_clk_src_max }
497 } 497 }
498}; 498};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index c233e3f653ce..056702ef69aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -158,7 +158,7 @@ gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
158 return read_clk(clk, 0x20, false); 158 return read_clk(clk, 0x20, false);
159 case nv_clk_src_vdec: 159 case nv_clk_src_vdec:
160 return read_clk(clk, 0x21, false); 160 return read_clk(clk, 0x21, false);
161 case nv_clk_src_daemon: 161 case nv_clk_src_pmu:
162 return read_clk(clk, 0x25, false); 162 return read_clk(clk, 0x25, false);
163 case nv_clk_src_host: 163 case nv_clk_src_host:
164 hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28; 164 hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index f5edfadb5b46..1b5fb02eab2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -132,7 +132,7 @@ static const struct nvkm_enum vm_engine[] = {
132 { 0x0000000b, "PCOUNTER" }, 132 { 0x0000000b, "PCOUNTER" },
133 { 0x0000000c, "SEMAPHORE_BG" }, 133 { 0x0000000c, "SEMAPHORE_BG" },
134 { 0x0000000d, "PCE0" }, 134 { 0x0000000d, "PCE0" },
135 { 0x0000000e, "PDAEMON" }, 135 { 0x0000000e, "PMU" },
136 {} 136 {}
137}; 137};
138 138
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 9df45030ff9f..1fa3ade468ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -216,11 +216,11 @@ r1373f4_fini(struct gk104_ramfuc *fuc)
216 ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16)); 216 ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16));
217 ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000); 217 ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000);
218 if (ram->mode == 2) { 218 if (ram->mode == 2) {
219 ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000002); 219 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000002);
220 ram_mask(fuc, 0x1373f4, 0x00001100, 0x000000000); 220 ram_mask(fuc, 0x1373f4, 0x00001100, 0x00000000);
221 } else { 221 } else {
222 ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000001); 222 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
223 ram_mask(fuc, 0x1373f4, 0x00010000, 0x000000000); 223 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
224 } 224 }
225 ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4); 225 ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
226} 226}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
index de888fa62b3e..7e77a7466992 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -2,3 +2,4 @@ nvkm-y += nvkm/subdev/ibus/gf100.o
2nvkm-y += nvkm/subdev/ibus/gf117.o 2nvkm-y += nvkm/subdev/ibus/gf117.o
3nvkm-y += nvkm/subdev/ibus/gk104.o 3nvkm-y += nvkm/subdev/ibus/gk104.o
4nvkm-y += nvkm/subdev/ibus/gk20a.o 4nvkm-y += nvkm/subdev/ibus/gk20a.o
5nvkm-y += nvkm/subdev/ibus/gm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index ba33609f643c..b5cee3f89aaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/ibus.h> 24#include "priv.h"
25 25
26static void 26static void
27gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i) 27gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -56,7 +56,7 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
56 nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); 56 nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
57} 57}
58 58
59static void 59void
60gk104_ibus_intr(struct nvkm_subdev *ibus) 60gk104_ibus_intr(struct nvkm_subdev *ibus)
61{ 61{
62 struct nvkm_device *device = ibus->device; 62 struct nvkm_device *device = ibus->device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
new file mode 100644
index 000000000000..b3839dc254ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26static const struct nvkm_subdev_func
27gm204_ibus = {
28 .intr = gk104_ibus_intr,
29};
30
31int
32gm204_ibus_new(struct nvkm_device *device, int index,
33 struct nvkm_subdev **pibus)
34{
35 struct nvkm_subdev *ibus;
36 if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
37 return -ENOMEM;
38 nvkm_subdev_ctor(&gm204_ibus, device, index, 0, ibus);
39 return 0;
40}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
index 48e1b6365ce6..01caf798cf31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
@@ -4,4 +4,5 @@
4#include <subdev/ibus.h> 4#include <subdev/ibus.h>
5 5
6void gf100_ibus_intr(struct nvkm_subdev *); 6void gf100_ibus_intr(struct nvkm_subdev *);
7void gk104_ibus_intr(struct nvkm_subdev *);
7#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 14107b5b7811..4c20fec64d96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -56,7 +56,6 @@ struct gk20a_instobj {
56 56
57 /* CPU mapping */ 57 /* CPU mapping */
58 u32 *vaddr; 58 u32 *vaddr;
59 struct list_head vaddr_node;
60}; 59};
61#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) 60#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
62 61
@@ -66,7 +65,6 @@ struct gk20a_instobj {
66struct gk20a_instobj_dma { 65struct gk20a_instobj_dma {
67 struct gk20a_instobj base; 66 struct gk20a_instobj base;
68 67
69 u32 *cpuaddr;
70 dma_addr_t handle; 68 dma_addr_t handle;
71 struct nvkm_mm_node r; 69 struct nvkm_mm_node r;
72}; 70};
@@ -79,6 +77,11 @@ struct gk20a_instobj_dma {
79struct gk20a_instobj_iommu { 77struct gk20a_instobj_iommu {
80 struct gk20a_instobj base; 78 struct gk20a_instobj base;
81 79
80 /* to link into gk20a_instmem::vaddr_lru */
81 struct list_head vaddr_node;
82 /* how many clients are using vaddr? */
83 u32 use_cpt;
84
82 /* will point to the higher half of pages */ 85 /* will point to the higher half of pages */
83 dma_addr_t *dma_addrs; 86 dma_addr_t *dma_addrs;
84 /* array of base.mem->size pages (+ dma_addr_ts) */ 87 /* array of base.mem->size pages (+ dma_addr_ts) */
@@ -107,8 +110,6 @@ struct gk20a_instmem {
107 110
108 /* Only used by DMA API */ 111 /* Only used by DMA API */
109 struct dma_attrs attrs; 112 struct dma_attrs attrs;
110
111 void __iomem * (*cpu_map)(struct nvkm_memory *);
112}; 113};
113#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) 114#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
114 115
@@ -130,70 +131,58 @@ gk20a_instobj_size(struct nvkm_memory *memory)
130 return (u64)gk20a_instobj(memory)->mem.size << 12; 131 return (u64)gk20a_instobj(memory)->mem.size << 12;
131} 132}
132 133
133static void __iomem * 134/*
134gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory) 135 * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held.
135{ 136 */
136#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 137static void
137 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); 138gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
138 struct device *dev = node->base.imem->base.subdev.device->dev;
139 int npages = nvkm_memory_size(memory) >> 12;
140 struct page *pages[npages];
141 int i;
142
143 /* we shouldn't see a gk20a on anything but arm/arm64 anyways */
144 /* phys_to_page does not exist on all platforms... */
145 pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT);
146 for (i = 1; i < npages; i++)
147 pages[i] = pages[0] + i;
148
149 return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
150#else
151 BUG();
152 return NULL;
153#endif
154}
155
156static void __iomem *
157gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory)
158{ 139{
159 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); 140 struct gk20a_instmem *imem = obj->base.imem;
160 int npages = nvkm_memory_size(memory) >> 12; 141 /* there should not be any user left... */
161 142 WARN_ON(obj->use_cpt);
162 return vmap(node->pages, npages, VM_MAP, 143 list_del(&obj->vaddr_node);
163 pgprot_writecombine(PAGE_KERNEL)); 144 vunmap(obj->base.vaddr);
145 obj->base.vaddr = NULL;
146 imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
147 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
148 imem->vaddr_max);
164} 149}
165 150
166/* 151/*
167 * Must be called while holding gk20a_instmem_lock 152 * Must be called while holding gk20a_instmem::lock
168 */ 153 */
169static void 154static void
170gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) 155gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
171{ 156{
172 while (imem->vaddr_use + size > imem->vaddr_max) { 157 while (imem->vaddr_use + size > imem->vaddr_max) {
173 struct gk20a_instobj *obj;
174
175 /* no candidate that can be unmapped, abort... */ 158 /* no candidate that can be unmapped, abort... */
176 if (list_empty(&imem->vaddr_lru)) 159 if (list_empty(&imem->vaddr_lru))
177 break; 160 break;
178 161
179 obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj, 162 gk20a_instobj_iommu_recycle_vaddr(
180 vaddr_node); 163 list_first_entry(&imem->vaddr_lru,
181 list_del(&obj->vaddr_node); 164 struct gk20a_instobj_iommu, vaddr_node));
182 vunmap(obj->vaddr);
183 obj->vaddr = NULL;
184 imem->vaddr_use -= nvkm_memory_size(&obj->memory);
185 nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n",
186 imem->vaddr_use, imem->vaddr_max);
187
188 } 165 }
189} 166}
190 167
191static void __iomem * 168static void __iomem *
192gk20a_instobj_acquire(struct nvkm_memory *memory) 169gk20a_instobj_acquire_dma(struct nvkm_memory *memory)
193{ 170{
194 struct gk20a_instobj *node = gk20a_instobj(memory); 171 struct gk20a_instobj *node = gk20a_instobj(memory);
195 struct gk20a_instmem *imem = node->imem; 172 struct gk20a_instmem *imem = node->imem;
196 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; 173 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
174
175 nvkm_ltc_flush(ltc);
176
177 return node->vaddr;
178}
179
180static void __iomem *
181gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
182{
183 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
184 struct gk20a_instmem *imem = node->base.imem;
185 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
197 const u64 size = nvkm_memory_size(memory); 186 const u64 size = nvkm_memory_size(memory);
198 unsigned long flags; 187 unsigned long flags;
199 188
@@ -201,19 +190,21 @@ gk20a_instobj_acquire(struct nvkm_memory *memory)
201 190
202 spin_lock_irqsave(&imem->lock, flags); 191 spin_lock_irqsave(&imem->lock, flags);
203 192
204 if (node->vaddr) { 193 if (node->base.vaddr) {
205 /* remove us from the LRU list since we cannot be unmapped */ 194 if (!node->use_cpt) {
206 list_del(&node->vaddr_node); 195 /* remove from LRU list since mapping in use again */
207 196 list_del(&node->vaddr_node);
197 }
208 goto out; 198 goto out;
209 } 199 }
210 200
211 /* try to free some address space if we reached the limit */ 201 /* try to free some address space if we reached the limit */
212 gk20a_instmem_vaddr_gc(imem, size); 202 gk20a_instmem_vaddr_gc(imem, size);
213 203
214 node->vaddr = imem->cpu_map(memory); 204 /* map the pages */
215 205 node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
216 if (!node->vaddr) { 206 pgprot_writecombine(PAGE_KERNEL));
207 if (!node->base.vaddr) {
217 nvkm_error(&imem->base.subdev, "cannot map instobj - " 208 nvkm_error(&imem->base.subdev, "cannot map instobj - "
218 "this is not going to end well...\n"); 209 "this is not going to end well...\n");
219 goto out; 210 goto out;
@@ -224,24 +215,41 @@ gk20a_instobj_acquire(struct nvkm_memory *memory)
224 imem->vaddr_use, imem->vaddr_max); 215 imem->vaddr_use, imem->vaddr_max);
225 216
226out: 217out:
218 node->use_cpt++;
227 spin_unlock_irqrestore(&imem->lock, flags); 219 spin_unlock_irqrestore(&imem->lock, flags);
228 220
229 return node->vaddr; 221 return node->base.vaddr;
230} 222}
231 223
232static void 224static void
233gk20a_instobj_release(struct nvkm_memory *memory) 225gk20a_instobj_release_dma(struct nvkm_memory *memory)
234{ 226{
235 struct gk20a_instobj *node = gk20a_instobj(memory); 227 struct gk20a_instobj *node = gk20a_instobj(memory);
236 struct gk20a_instmem *imem = node->imem; 228 struct gk20a_instmem *imem = node->imem;
237 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; 229 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
230
231 nvkm_ltc_invalidate(ltc);
232}
233
234static void
235gk20a_instobj_release_iommu(struct nvkm_memory *memory)
236{
237 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
238 struct gk20a_instmem *imem = node->base.imem;
239 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
238 unsigned long flags; 240 unsigned long flags;
239 241
240 spin_lock_irqsave(&imem->lock, flags); 242 spin_lock_irqsave(&imem->lock, flags);
241 243
242 /* add ourselves to the LRU list so our CPU mapping can be freed */ 244 /* we should at least have one user to release... */
243 list_add_tail(&node->vaddr_node, &imem->vaddr_lru); 245 if (WARN_ON(node->use_cpt == 0))
246 goto out;
247
248 /* add unused objs to the LRU list to recycle their mapping */
249 if (--node->use_cpt == 0)
250 list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
244 251
252out:
245 spin_unlock_irqrestore(&imem->lock, flags); 253 spin_unlock_irqrestore(&imem->lock, flags);
246 254
247 wmb(); 255 wmb();
@@ -272,37 +280,6 @@ gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
272 nvkm_vm_map_at(vma, offset, &node->mem); 280 nvkm_vm_map_at(vma, offset, &node->mem);
273} 281}
274 282
275/*
276 * Clear the CPU mapping of an instobj if it exists
277 */
278static void
279gk20a_instobj_dtor(struct gk20a_instobj *node)
280{
281 struct gk20a_instmem *imem = node->imem;
282 struct gk20a_instobj *obj;
283 unsigned long flags;
284
285 spin_lock_irqsave(&imem->lock, flags);
286
287 if (!node->vaddr)
288 goto out;
289
290 list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) {
291 if (obj == node) {
292 list_del(&obj->vaddr_node);
293 break;
294 }
295 }
296 vunmap(node->vaddr);
297 node->vaddr = NULL;
298 imem->vaddr_use -= nvkm_memory_size(&node->memory);
299 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
300 imem->vaddr_use, imem->vaddr_max);
301
302out:
303 spin_unlock_irqrestore(&imem->lock, flags);
304}
305
306static void * 283static void *
307gk20a_instobj_dtor_dma(struct nvkm_memory *memory) 284gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
308{ 285{
@@ -310,12 +287,10 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
310 struct gk20a_instmem *imem = node->base.imem; 287 struct gk20a_instmem *imem = node->base.imem;
311 struct device *dev = imem->base.subdev.device->dev; 288 struct device *dev = imem->base.subdev.device->dev;
312 289
313 gk20a_instobj_dtor(&node->base); 290 if (unlikely(!node->base.vaddr))
314
315 if (unlikely(!node->cpuaddr))
316 goto out; 291 goto out;
317 292
318 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr, 293 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
319 node->handle, &imem->attrs); 294 node->handle, &imem->attrs);
320 295
321out: 296out:
@@ -329,13 +304,20 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
329 struct gk20a_instmem *imem = node->base.imem; 304 struct gk20a_instmem *imem = node->base.imem;
330 struct device *dev = imem->base.subdev.device->dev; 305 struct device *dev = imem->base.subdev.device->dev;
331 struct nvkm_mm_node *r; 306 struct nvkm_mm_node *r;
307 unsigned long flags;
332 int i; 308 int i;
333 309
334 gk20a_instobj_dtor(&node->base);
335
336 if (unlikely(list_empty(&node->base.mem.regions))) 310 if (unlikely(list_empty(&node->base.mem.regions)))
337 goto out; 311 goto out;
338 312
313 spin_lock_irqsave(&imem->lock, flags);
314
315 /* vaddr has already been recycled */
316 if (node->base.vaddr)
317 gk20a_instobj_iommu_recycle_vaddr(node);
318
319 spin_unlock_irqrestore(&imem->lock, flags);
320
339 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node, 321 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
340 rl_entry); 322 rl_entry);
341 323
@@ -366,8 +348,8 @@ gk20a_instobj_func_dma = {
366 .target = gk20a_instobj_target, 348 .target = gk20a_instobj_target,
367 .addr = gk20a_instobj_addr, 349 .addr = gk20a_instobj_addr,
368 .size = gk20a_instobj_size, 350 .size = gk20a_instobj_size,
369 .acquire = gk20a_instobj_acquire, 351 .acquire = gk20a_instobj_acquire_dma,
370 .release = gk20a_instobj_release, 352 .release = gk20a_instobj_release_dma,
371 .rd32 = gk20a_instobj_rd32, 353 .rd32 = gk20a_instobj_rd32,
372 .wr32 = gk20a_instobj_wr32, 354 .wr32 = gk20a_instobj_wr32,
373 .map = gk20a_instobj_map, 355 .map = gk20a_instobj_map,
@@ -379,8 +361,8 @@ gk20a_instobj_func_iommu = {
379 .target = gk20a_instobj_target, 361 .target = gk20a_instobj_target,
380 .addr = gk20a_instobj_addr, 362 .addr = gk20a_instobj_addr,
381 .size = gk20a_instobj_size, 363 .size = gk20a_instobj_size,
382 .acquire = gk20a_instobj_acquire, 364 .acquire = gk20a_instobj_acquire_iommu,
383 .release = gk20a_instobj_release, 365 .release = gk20a_instobj_release_iommu,
384 .rd32 = gk20a_instobj_rd32, 366 .rd32 = gk20a_instobj_rd32,
385 .wr32 = gk20a_instobj_wr32, 367 .wr32 = gk20a_instobj_wr32,
386 .map = gk20a_instobj_map, 368 .map = gk20a_instobj_map,
@@ -400,10 +382,10 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
400 382
401 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); 383 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
402 384
403 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, 385 node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
404 &node->handle, GFP_KERNEL, 386 &node->handle, GFP_KERNEL,
405 &imem->attrs); 387 &imem->attrs);
406 if (!node->cpuaddr) { 388 if (!node->base.vaddr) {
407 nvkm_error(subdev, "cannot allocate DMA memory\n"); 389 nvkm_error(subdev, "cannot allocate DMA memory\n");
408 return -ENOMEM; 390 return -ENOMEM;
409 } 391 }
@@ -609,18 +591,14 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
609 imem->mm = &tdev->iommu.mm; 591 imem->mm = &tdev->iommu.mm;
610 imem->domain = tdev->iommu.domain; 592 imem->domain = tdev->iommu.domain;
611 imem->iommu_pgshift = tdev->iommu.pgshift; 593 imem->iommu_pgshift = tdev->iommu.pgshift;
612 imem->cpu_map = gk20a_instobj_cpu_map_iommu;
613 imem->iommu_bit = tdev->func->iommu_bit; 594 imem->iommu_bit = tdev->func->iommu_bit;
614 595
615 nvkm_info(&imem->base.subdev, "using IOMMU\n"); 596 nvkm_info(&imem->base.subdev, "using IOMMU\n");
616 } else { 597 } else {
617 init_dma_attrs(&imem->attrs); 598 init_dma_attrs(&imem->attrs);
618 /* We will access the memory through our own mapping */
619 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs); 599 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
620 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs); 600 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
621 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); 601 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
622 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
623 imem->cpu_map = gk20a_instobj_cpu_map_dma;
624 602
625 nvkm_info(&imem->base.subdev, "using DMA API\n"); 603 nvkm_info(&imem->base.subdev, "using DMA API\n");
626 } 604 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index e5df3d865f0c..f8108df3cb38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -2,3 +2,4 @@ nvkm-y += nvkm/subdev/ltc/base.o
2nvkm-y += nvkm/subdev/ltc/gf100.o 2nvkm-y += nvkm/subdev/ltc/gf100.o
3nvkm-y += nvkm/subdev/ltc/gk104.o 3nvkm-y += nvkm/subdev/ltc/gk104.o
4nvkm-y += nvkm/subdev/ltc/gm107.o 4nvkm-y += nvkm/subdev/ltc/gm107.o
5nvkm-y += nvkm/subdev/ltc/gm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 3043bbfd7384..2af1f9e100fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -26,16 +26,16 @@
26#include <subdev/fb.h> 26#include <subdev/fb.h>
27#include <subdev/timer.h> 27#include <subdev/timer.h>
28 28
29static void 29void
30gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit) 30gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
31{ 31{
32 struct nvkm_device *device = ltc->subdev.device; 32 struct nvkm_device *device = ltc->subdev.device;
33 nvkm_wr32(device, 0x17e270, start); 33 nvkm_wr32(device, 0x17e270, start);
34 nvkm_wr32(device, 0x17e274, limit); 34 nvkm_wr32(device, 0x17e274, limit);
35 nvkm_wr32(device, 0x17e26c, 0x00000004); 35 nvkm_mask(device, 0x17e26c, 0x00000000, 0x00000004);
36} 36}
37 37
38static void 38void
39gm107_ltc_cbc_wait(struct nvkm_ltc *ltc) 39gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
40{ 40{
41 struct nvkm_device *device = ltc->subdev.device; 41 struct nvkm_device *device = ltc->subdev.device;
@@ -51,7 +51,7 @@ gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
51 } 51 }
52} 52}
53 53
54static void 54void
55gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4]) 55gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
56{ 56{
57 struct nvkm_device *device = ltc->subdev.device; 57 struct nvkm_device *device = ltc->subdev.device;
@@ -62,7 +62,7 @@ gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
62 nvkm_wr32(device, 0x17e348, color[3]); 62 nvkm_wr32(device, 0x17e348, color[3]);
63} 63}
64 64
65static void 65void
66gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) 66gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
67{ 67{
68 struct nvkm_device *device = ltc->subdev.device; 68 struct nvkm_device *device = ltc->subdev.device;
@@ -84,7 +84,7 @@ gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
84 } 84 }
85} 85}
86 86
87static void 87void
88gm107_ltc_intr(struct nvkm_ltc *ltc) 88gm107_ltc_intr(struct nvkm_ltc *ltc)
89{ 89{
90 struct nvkm_device *device = ltc->subdev.device; 90 struct nvkm_device *device = ltc->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c
new file mode 100644
index 000000000000..5ad6fb9d022d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26#include <subdev/fb.h>
27#include <subdev/timer.h>
28
29static int
30gm204_ltc_oneinit(struct nvkm_ltc *ltc)
31{
32 struct nvkm_device *device = ltc->subdev.device;
33
34 ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
35 ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
36
37 return gf100_ltc_oneinit_tag_ram(ltc);
38}
39static void
40gm204_ltc_init(struct nvkm_ltc *ltc)
41{
42 nvkm_wr32(ltc->subdev.device, 0x17e278, ltc->tag_base);
43}
44
45static const struct nvkm_ltc_func
46gm204_ltc = {
47 .oneinit = gm204_ltc_oneinit,
48 .init = gm204_ltc_init,
49 .intr = gm107_ltc_intr, /*XXX: not validated */
50 .cbc_clear = gm107_ltc_cbc_clear,
51 .cbc_wait = gm107_ltc_cbc_wait,
52 .zbc = 16,
53 .zbc_clear_color = gm107_ltc_zbc_clear_color,
54 .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
55 .invalidate = gf100_ltc_invalidate,
56 .flush = gf100_ltc_flush,
57};
58
59int
60gm204_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
61{
62 return nvkm_ltc_new_(&gm204_ltc, device, index, pltc);
63}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 4e3755b82769..6d81c695ed0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -31,4 +31,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
31void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); 31void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
32void gf100_ltc_invalidate(struct nvkm_ltc *); 32void gf100_ltc_invalidate(struct nvkm_ltc *);
33void gf100_ltc_flush(struct nvkm_ltc *); 33void gf100_ltc_flush(struct nvkm_ltc *);
34
35void gm107_ltc_intr(struct nvkm_ltc *);
36void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
37void gm107_ltc_cbc_wait(struct nvkm_ltc *);
38void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
39void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
34#endif 40#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 4476ef75acd6..3c2519fdeb81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -1,5 +1,6 @@
1nvkm-y += nvkm/subdev/pci/agp.o 1nvkm-y += nvkm/subdev/pci/agp.o
2nvkm-y += nvkm/subdev/pci/base.o 2nvkm-y += nvkm/subdev/pci/base.o
3nvkm-y += nvkm/subdev/pci/pcie.o
3nvkm-y += nvkm/subdev/pci/nv04.o 4nvkm-y += nvkm/subdev/pci/nv04.o
4nvkm-y += nvkm/subdev/pci/nv40.o 5nvkm-y += nvkm/subdev/pci/nv40.o
5nvkm-y += nvkm/subdev/pci/nv46.o 6nvkm-y += nvkm/subdev/pci/nv46.o
@@ -7,3 +8,5 @@ nvkm-y += nvkm/subdev/pci/nv4c.o
7nvkm-y += nvkm/subdev/pci/g84.o 8nvkm-y += nvkm/subdev/pci/g84.o
8nvkm-y += nvkm/subdev/pci/g94.o 9nvkm-y += nvkm/subdev/pci/g94.o
9nvkm-y += nvkm/subdev/pci/gf100.o 10nvkm-y += nvkm/subdev/pci/gf100.o
11nvkm-y += nvkm/subdev/pci/gf106.o
12nvkm-y += nvkm/subdev/pci/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index d671dcfaff3c..65057c8310a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -107,6 +107,15 @@ nvkm_pci_preinit(struct nvkm_subdev *subdev)
107} 107}
108 108
109static int 109static int
110nvkm_pci_oneinit(struct nvkm_subdev *subdev)
111{
112 struct nvkm_pci *pci = nvkm_pci(subdev);
113 if (pci_is_pcie(pci->pdev))
114 return nvkm_pcie_oneinit(pci);
115 return 0;
116}
117
118static int
110nvkm_pci_init(struct nvkm_subdev *subdev) 119nvkm_pci_init(struct nvkm_subdev *subdev)
111{ 120{
112 struct nvkm_pci *pci = nvkm_pci(subdev); 121 struct nvkm_pci *pci = nvkm_pci(subdev);
@@ -117,6 +126,8 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
117 ret = nvkm_agp_init(pci); 126 ret = nvkm_agp_init(pci);
118 if (ret) 127 if (ret)
119 return ret; 128 return ret;
129 } else if (pci_is_pcie(pci->pdev)) {
130 nvkm_pcie_init(pci);
120 } 131 }
121 132
122 if (pci->func->init) 133 if (pci->func->init)
@@ -143,6 +154,7 @@ nvkm_pci_dtor(struct nvkm_subdev *subdev)
143static const struct nvkm_subdev_func 154static const struct nvkm_subdev_func
144nvkm_pci_func = { 155nvkm_pci_func = {
145 .dtor = nvkm_pci_dtor, 156 .dtor = nvkm_pci_dtor,
157 .oneinit = nvkm_pci_oneinit,
146 .preinit = nvkm_pci_preinit, 158 .preinit = nvkm_pci_preinit,
147 .init = nvkm_pci_init, 159 .init = nvkm_pci_init,
148 .fini = nvkm_pci_fini, 160 .fini = nvkm_pci_fini,
@@ -160,6 +172,8 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
160 pci->func = func; 172 pci->func = func;
161 pci->pdev = device->func->pci(device)->pdev; 173 pci->pdev = device->func->pci(device)->pdev;
162 pci->irq = -1; 174 pci->irq = -1;
175 pci->pcie.speed = -1;
176 pci->pcie.width = -1;
163 177
164 if (device->type == NVKM_DEVICE_AGP) 178 if (device->type == NVKM_DEVICE_AGP)
165 nvkm_agp_ctor(pci); 179 nvkm_agp_ctor(pci);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 3faa6bfb895b..62438d892f42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -25,6 +25,80 @@
25 25
26#include <core/pci.h> 26#include <core/pci.h>
27 27
28static int
29g84_pcie_version_supported(struct nvkm_pci *pci)
30{
31 /* g84 and g86 report wrong information about what they support */
32 return 1;
33}
34
35int
36g84_pcie_version(struct nvkm_pci *pci)
37{
38 struct nvkm_device *device = pci->subdev.device;
39 return (nvkm_rd32(device, 0x00154c) & 0x1) + 1;
40}
41
42void
43g84_pcie_set_version(struct nvkm_pci *pci, u8 ver)
44{
45 struct nvkm_device *device = pci->subdev.device;
46 nvkm_mask(device, 0x00154c, 0x1, (ver >= 2 ? 0x1 : 0x0));
47}
48
49static void
50g84_pcie_set_cap_speed(struct nvkm_pci *pci, bool full_speed)
51{
52 struct nvkm_device *device = pci->subdev.device;
53 nvkm_mask(device, 0x00154c, 0x80, full_speed ? 0x80 : 0x0);
54}
55
56enum nvkm_pcie_speed
57g84_pcie_cur_speed(struct nvkm_pci *pci)
58{
59 u32 reg_v = nvkm_pci_rd32(pci, 0x88) & 0x30000;
60 switch (reg_v) {
61 case 0x30000:
62 return NVKM_PCIE_SPEED_8_0;
63 case 0x20000:
64 return NVKM_PCIE_SPEED_5_0;
65 case 0x10000:
66 default:
67 return NVKM_PCIE_SPEED_2_5;
68 }
69}
70
71enum nvkm_pcie_speed
72g84_pcie_max_speed(struct nvkm_pci *pci)
73{
74 u32 reg_v = nvkm_pci_rd32(pci, 0x460) & 0x3300;
75 if (reg_v == 0x2200)
76 return NVKM_PCIE_SPEED_5_0;
77 return NVKM_PCIE_SPEED_2_5;
78}
79
80void
81g84_pcie_set_link_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
82{
83 u32 mask_value;
84
85 if (speed == NVKM_PCIE_SPEED_5_0)
86 mask_value = 0x20;
87 else
88 mask_value = 0x10;
89
90 nvkm_pci_mask(pci, 0x460, 0x30, mask_value);
91 nvkm_pci_mask(pci, 0x460, 0x1, 0x1);
92}
93
94int
95g84_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
96{
97 g84_pcie_set_cap_speed(pci, speed == NVKM_PCIE_SPEED_5_0);
98 g84_pcie_set_link_speed(pci, speed);
99 return 0;
100}
101
28void 102void
29g84_pci_init(struct nvkm_pci *pci) 103g84_pci_init(struct nvkm_pci *pci)
30{ 104{
@@ -48,6 +122,14 @@ g84_pci_init(struct nvkm_pci *pci)
48 nvkm_pci_mask(pci, 0x041c, 0x00000060, 0x00000000); 122 nvkm_pci_mask(pci, 0x041c, 0x00000060, 0x00000000);
49} 123}
50 124
125int
126g84_pcie_init(struct nvkm_pci *pci)
127{
128 bool full_speed = g84_pcie_cur_speed(pci) == NVKM_PCIE_SPEED_5_0;
129 g84_pcie_set_cap_speed(pci, full_speed);
130 return 0;
131}
132
51static const struct nvkm_pci_func 133static const struct nvkm_pci_func
52g84_pci_func = { 134g84_pci_func = {
53 .init = g84_pci_init, 135 .init = g84_pci_init,
@@ -55,6 +137,16 @@ g84_pci_func = {
55 .wr08 = nv40_pci_wr08, 137 .wr08 = nv40_pci_wr08,
56 .wr32 = nv40_pci_wr32, 138 .wr32 = nv40_pci_wr32,
57 .msi_rearm = nv46_pci_msi_rearm, 139 .msi_rearm = nv46_pci_msi_rearm,
140
141 .pcie.init = g84_pcie_init,
142 .pcie.set_link = g84_pcie_set_link,
143
144 .pcie.max_speed = g84_pcie_max_speed,
145 .pcie.cur_speed = g84_pcie_cur_speed,
146
147 .pcie.set_version = g84_pcie_set_version,
148 .pcie.version = g84_pcie_version,
149 .pcie.version_supported = g84_pcie_version_supported,
58}; 150};
59 151
60int 152int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index cd311ee311cc..43444123bc04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -23,6 +23,14 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26int
27g94_pcie_version_supported(struct nvkm_pci *pci)
28{
29 if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200)
30 return 2;
31 return 1;
32}
33
26static const struct nvkm_pci_func 34static const struct nvkm_pci_func
27g94_pci_func = { 35g94_pci_func = {
28 .init = g84_pci_init, 36 .init = g84_pci_init,
@@ -30,6 +38,16 @@ g94_pci_func = {
30 .wr08 = nv40_pci_wr08, 38 .wr08 = nv40_pci_wr08,
31 .wr32 = nv40_pci_wr32, 39 .wr32 = nv40_pci_wr32,
32 .msi_rearm = nv40_pci_msi_rearm, 40 .msi_rearm = nv40_pci_msi_rearm,
41
42 .pcie.init = g84_pcie_init,
43 .pcie.set_link = g84_pcie_set_link,
44
45 .pcie.max_speed = g84_pcie_max_speed,
46 .pcie.cur_speed = g84_pcie_cur_speed,
47
48 .pcie.set_version = g84_pcie_set_version,
49 .pcie.version = g84_pcie_version,
50 .pcie.version_supported = g94_pcie_version_supported,
33}; 51};
34 52
35int 53int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 25e1ae70867f..e30ea676baf6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -29,6 +29,53 @@ gf100_pci_msi_rearm(struct nvkm_pci *pci)
29 nvkm_pci_wr08(pci, 0x0704, 0xff); 29 nvkm_pci_wr08(pci, 0x0704, 0xff);
30} 30}
31 31
32void
33gf100_pcie_set_version(struct nvkm_pci *pci, u8 ver)
34{
35 struct nvkm_device *device = pci->subdev.device;
36 nvkm_mask(device, 0x02241c, 0x1, ver > 1 ? 1 : 0);
37}
38
39int
40gf100_pcie_version(struct nvkm_pci *pci)
41{
42 struct nvkm_device *device = pci->subdev.device;
43 return (nvkm_rd32(device, 0x02241c) & 0x1) + 1;
44}
45
46void
47gf100_pcie_set_cap_speed(struct nvkm_pci *pci, bool full_speed)
48{
49 struct nvkm_device *device = pci->subdev.device;
50 nvkm_mask(device, 0x02241c, 0x80, full_speed ? 0x80 : 0x0);
51}
52
53int
54gf100_pcie_cap_speed(struct nvkm_pci *pci)
55{
56 struct nvkm_device *device = pci->subdev.device;
57 u8 punits_pci_cap_speed = nvkm_rd32(device, 0x02241c) & 0x80;
58 if (punits_pci_cap_speed == 0x80)
59 return 1;
60 return 0;
61}
62
63int
64gf100_pcie_init(struct nvkm_pci *pci)
65{
66 bool full_speed = g84_pcie_cur_speed(pci) == NVKM_PCIE_SPEED_5_0;
67 gf100_pcie_set_cap_speed(pci, full_speed);
68 return 0;
69}
70
71int
72gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
73{
74 gf100_pcie_set_cap_speed(pci, speed == NVKM_PCIE_SPEED_5_0);
75 g84_pcie_set_link_speed(pci, speed);
76 return 0;
77}
78
32static const struct nvkm_pci_func 79static const struct nvkm_pci_func
33gf100_pci_func = { 80gf100_pci_func = {
34 .init = g84_pci_init, 81 .init = g84_pci_init,
@@ -36,6 +83,16 @@ gf100_pci_func = {
36 .wr08 = nv40_pci_wr08, 83 .wr08 = nv40_pci_wr08,
37 .wr32 = nv40_pci_wr32, 84 .wr32 = nv40_pci_wr32,
38 .msi_rearm = gf100_pci_msi_rearm, 85 .msi_rearm = gf100_pci_msi_rearm,
86
87 .pcie.init = gf100_pcie_init,
88 .pcie.set_link = gf100_pcie_set_link,
89
90 .pcie.max_speed = g84_pcie_max_speed,
91 .pcie.cur_speed = g84_pcie_cur_speed,
92
93 .pcie.set_version = gf100_pcie_set_version,
94 .pcie.version = gf100_pcie_version,
95 .pcie.version_supported = g94_pcie_version_supported,
39}; 96};
40 97
41int 98int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
new file mode 100644
index 000000000000..c3b798c5c6dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2015 Karol Herbst <nouveau@karolherbst.de>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <nouveau@karolherbst.de>
23 */
24#include "priv.h"
25
26static const struct nvkm_pci_func
27gf106_pci_func = {
28 .init = g84_pci_init,
29 .rd32 = nv40_pci_rd32,
30 .wr08 = nv40_pci_wr08,
31 .wr32 = nv40_pci_wr32,
32 .msi_rearm = nv40_pci_msi_rearm,
33
34 .pcie.init = gf100_pcie_init,
35 .pcie.set_link = gf100_pcie_set_link,
36
37 .pcie.max_speed = g84_pcie_max_speed,
38 .pcie.cur_speed = g84_pcie_cur_speed,
39
40 .pcie.set_version = gf100_pcie_set_version,
41 .pcie.version = gf100_pcie_version,
42 .pcie.version_supported = g94_pcie_version_supported,
43};
44
45int
46gf106_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
47{
48 return nvkm_pci_new_(&gf106_pci_func, device, index, ppci);
49}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
new file mode 100644
index 000000000000..e68030507d88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright 2015 Karol Herbst <nouveau@karolherbst.de>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <nouveau@karolherbst.de>
23 */
24#include "priv.h"
25
26static int
27gk104_pcie_version_supported(struct nvkm_pci *pci)
28{
29 return (nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x4) == 0x4 ? 2 : 1;
30}
31
32static void
33gk104_pcie_set_cap_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
34{
35 struct nvkm_device *device = pci->subdev.device;
36
37 switch (speed) {
38 case NVKM_PCIE_SPEED_2_5:
39 gf100_pcie_set_cap_speed(pci, false);
40 nvkm_mask(device, 0x8c1c0, 0x30000, 0x10000);
41 break;
42 case NVKM_PCIE_SPEED_5_0:
43 gf100_pcie_set_cap_speed(pci, true);
44 nvkm_mask(device, 0x8c1c0, 0x30000, 0x20000);
45 break;
46 case NVKM_PCIE_SPEED_8_0:
47 gf100_pcie_set_cap_speed(pci, true);
48 nvkm_mask(device, 0x8c1c0, 0x30000, 0x30000);
49 break;
50 }
51}
52
53static enum nvkm_pcie_speed
54gk104_pcie_cap_speed(struct nvkm_pci *pci)
55{
56 int speed = gf100_pcie_cap_speed(pci);
57
58 if (speed == 0)
59 return NVKM_PCIE_SPEED_2_5;
60
61 if (speed >= 1) {
62 int speed2 = nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x30000;
63 switch (speed2) {
64 case 0x00000:
65 case 0x10000:
66 return NVKM_PCIE_SPEED_2_5;
67 case 0x20000:
68 return NVKM_PCIE_SPEED_5_0;
69 case 0x30000:
70 return NVKM_PCIE_SPEED_8_0;
71 }
72 }
73
74 return -EINVAL;
75}
76
77static void
78gk104_pcie_set_lnkctl_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
79{
80 u8 reg_v = 0;
81 switch (speed) {
82 case NVKM_PCIE_SPEED_2_5:
83 reg_v = 1;
84 break;
85 case NVKM_PCIE_SPEED_5_0:
86 reg_v = 2;
87 break;
88 case NVKM_PCIE_SPEED_8_0:
89 reg_v = 3;
90 break;
91 }
92 nvkm_pci_mask(pci, 0xa8, 0x3, reg_v);
93}
94
95static enum nvkm_pcie_speed
96gk104_pcie_lnkctl_speed(struct nvkm_pci *pci)
97{
98 u8 reg_v = nvkm_pci_rd32(pci, 0xa8) & 0x3;
99 switch (reg_v) {
100 case 0:
101 case 1:
102 return NVKM_PCIE_SPEED_2_5;
103 case 2:
104 return NVKM_PCIE_SPEED_5_0;
105 case 3:
106 return NVKM_PCIE_SPEED_8_0;
107 }
108 return -1;
109}
110
111static enum nvkm_pcie_speed
112gk104_pcie_max_speed(struct nvkm_pci *pci)
113{
114 u32 max_speed = nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x300000;
115 switch (max_speed) {
116 case 0x000000:
117 return NVKM_PCIE_SPEED_8_0;
118 case 0x100000:
119 return NVKM_PCIE_SPEED_5_0;
120 case 0x200000:
121 return NVKM_PCIE_SPEED_2_5;
122 }
123 return NVKM_PCIE_SPEED_2_5;
124}
125
126static void
127gk104_pcie_set_link_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
128{
129 struct nvkm_device *device = pci->subdev.device;
130 u32 mask_value;
131
132 switch (speed) {
133 case NVKM_PCIE_SPEED_8_0:
134 mask_value = 0x00000;
135 break;
136 case NVKM_PCIE_SPEED_5_0:
137 mask_value = 0x40000;
138 break;
139 case NVKM_PCIE_SPEED_2_5:
140 default:
141 mask_value = 0x80000;
142 break;
143 }
144
145 nvkm_mask(device, 0x8c040, 0xc0000, mask_value);
146 nvkm_mask(device, 0x8c040, 0x1, 0x1);
147}
148
149static int
150gk104_pcie_init(struct nvkm_pci * pci)
151{
152 enum nvkm_pcie_speed lnkctl_speed, max_speed, cap_speed;
153 struct nvkm_subdev *subdev = &pci->subdev;
154
155 if (gf100_pcie_version(pci) < 2)
156 return 0;
157
158 lnkctl_speed = gk104_pcie_lnkctl_speed(pci);
159 max_speed = gk104_pcie_max_speed(pci);
160 cap_speed = gk104_pcie_cap_speed(pci);
161
162 if (cap_speed != max_speed) {
163 nvkm_trace(subdev, "adjusting cap to max speed\n");
164 gk104_pcie_set_cap_speed(pci, max_speed);
165 cap_speed = gk104_pcie_cap_speed(pci);
166 if (cap_speed != max_speed)
167 nvkm_warn(subdev, "failed to adjust cap speed\n");
168 }
169
170 if (lnkctl_speed != max_speed) {
171 nvkm_debug(subdev, "adjusting lnkctl to max speed\n");
172 gk104_pcie_set_lnkctl_speed(pci, max_speed);
173 lnkctl_speed = gk104_pcie_lnkctl_speed(pci);
174 if (lnkctl_speed != max_speed)
175 nvkm_error(subdev, "failed to adjust lnkctl speed\n");
176 }
177
178 return 0;
179}
180
181static int
182gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
183{
184 struct nvkm_subdev *subdev = &pci->subdev;
185 enum nvkm_pcie_speed lnk_ctl_speed = gk104_pcie_lnkctl_speed(pci);
186 enum nvkm_pcie_speed lnk_cap_speed = gk104_pcie_cap_speed(pci);
187
188 if (speed > lnk_cap_speed) {
189 speed = lnk_cap_speed;
190 nvkm_warn(subdev, "dropping requested speed due too low cap"
191 " speed\n");
192 }
193
194 if (speed > lnk_ctl_speed) {
195 speed = lnk_ctl_speed;
196 nvkm_warn(subdev, "dropping requested speed due too low"
197 " lnkctl speed\n");
198 }
199
200 gk104_pcie_set_link_speed(pci, speed);
201 return 0;
202}
203
204
205static const struct nvkm_pci_func
206gk104_pci_func = {
207 .init = g84_pci_init,
208 .rd32 = nv40_pci_rd32,
209 .wr08 = nv40_pci_wr08,
210 .wr32 = nv40_pci_wr32,
211 .msi_rearm = nv40_pci_msi_rearm,
212
213 .pcie.init = gk104_pcie_init,
214 .pcie.set_link = gk104_pcie_set_link,
215
216 .pcie.max_speed = gk104_pcie_max_speed,
217 .pcie.cur_speed = g84_pcie_cur_speed,
218
219 .pcie.set_version = gf100_pcie_set_version,
220 .pcie.version = gf100_pcie_version,
221 .pcie.version_supported = gk104_pcie_version_supported,
222};
223
224int
225gk104_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
226{
227 return nvkm_pci_new_(&gk104_pci_func, device, index, ppci);
228}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/pcie.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/pcie.c
new file mode 100644
index 000000000000..d71e5db5028a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/pcie.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright 2015 Karol Herbst <nouveau@karolherbst.de>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <git@karolherbst.de>
23 */
24#include "priv.h"
25
26static char *nvkm_pcie_speeds[] = {
27 "2.5GT/s",
28 "5.0GT/s",
29 "8.0GT/s",
30};
31
32static enum nvkm_pcie_speed
33nvkm_pcie_speed(enum pci_bus_speed speed)
34{
35 switch (speed) {
36 case PCIE_SPEED_2_5GT:
37 return NVKM_PCIE_SPEED_2_5;
38 case PCIE_SPEED_5_0GT:
39 return NVKM_PCIE_SPEED_5_0;
40 case PCIE_SPEED_8_0GT:
41 return NVKM_PCIE_SPEED_8_0;
42 default:
43 /* XXX 0x16 is 8_0, assume 0x17 will be 16_0 for now */
44 if (speed == 0x17)
45 return NVKM_PCIE_SPEED_8_0;
46 return -1;
47 }
48}
49
50static int
51nvkm_pcie_get_version(struct nvkm_pci *pci)
52{
53 if (!pci->func->pcie.version)
54 return -ENOSYS;
55
56 return pci->func->pcie.version(pci);
57}
58
59static int
60nvkm_pcie_get_max_version(struct nvkm_pci *pci)
61{
62 if (!pci->func->pcie.version_supported)
63 return -ENOSYS;
64
65 return pci->func->pcie.version_supported(pci);
66}
67
68static int
69nvkm_pcie_set_version(struct nvkm_pci *pci, int version)
70{
71 if (!pci->func->pcie.set_version)
72 return -ENOSYS;
73
74 nvkm_trace(&pci->subdev, "set to version %i\n", version);
75 pci->func->pcie.set_version(pci, version);
76 return nvkm_pcie_get_version(pci);
77}
78
79int
80nvkm_pcie_oneinit(struct nvkm_pci *pci)
81{
82 if (pci->func->pcie.max_speed)
83 nvkm_debug(&pci->subdev, "pcie max speed: %s\n",
84 nvkm_pcie_speeds[pci->func->pcie.max_speed(pci)]);
85 return 0;
86}
87
88int
89nvkm_pcie_init(struct nvkm_pci *pci)
90{
91 struct nvkm_subdev *subdev = &pci->subdev;
92 int ret;
93
94 /* raise pcie version first */
95 ret = nvkm_pcie_get_version(pci);
96 if (ret > 0) {
97 int max_version = nvkm_pcie_get_max_version(pci);
98 if (max_version > 0 && max_version > ret)
99 ret = nvkm_pcie_set_version(pci, max_version);
100
101 if (ret < max_version)
102 nvkm_error(subdev, "couldn't raise version: %i\n", ret);
103 }
104
105 if (pci->func->pcie.init)
106 pci->func->pcie.init(pci);
107
108 if (pci->pcie.speed != -1)
109 nvkm_pcie_set_link(pci, pci->pcie.speed, pci->pcie.width);
110
111 return 0;
112}
113
114int
115nvkm_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
116{
117 struct nvkm_subdev *subdev = &pci->subdev;
118 enum nvkm_pcie_speed cur_speed, max_speed;
119 struct pci_bus *pbus;
120 int ret;
121
122 if (!pci || !pci_is_pcie(pci->pdev))
123 return 0;
124 pbus = pci->pdev->bus;
125
126 if (!pci->func->pcie.set_link)
127 return -ENOSYS;
128
129 nvkm_trace(subdev, "requested %s\n", nvkm_pcie_speeds[speed]);
130
131 if (pci->func->pcie.version(pci) < 2) {
132 nvkm_error(subdev, "setting link failed due to low version\n");
133 return -ENODEV;
134 }
135
136 cur_speed = pci->func->pcie.cur_speed(pci);
137 max_speed = min(nvkm_pcie_speed(pbus->max_bus_speed),
138 pci->func->pcie.max_speed(pci));
139
140 nvkm_trace(subdev, "current speed: %s\n", nvkm_pcie_speeds[cur_speed]);
141
142 if (speed > max_speed) {
143 nvkm_debug(subdev, "%s not supported by bus or card, dropping"
144 "requested speed to %s", nvkm_pcie_speeds[speed],
145 nvkm_pcie_speeds[max_speed]);
146 speed = max_speed;
147 }
148
149 pci->pcie.speed = speed;
150 pci->pcie.width = width;
151
152 if (speed == cur_speed) {
153 nvkm_debug(subdev, "requested matches current speed\n");
154 return speed;
155 }
156
157 nvkm_debug(subdev, "set link to %s x%i\n",
158 nvkm_pcie_speeds[speed], width);
159
160 ret = pci->func->pcie.set_link(pci, speed, width);
161 if (ret < 0)
162 nvkm_error(subdev, "setting link failed: %i\n", ret);
163
164 return ret;
165}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index cf46d38d0b0a..23de3180aae5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -12,6 +12,18 @@ struct nvkm_pci_func {
12 void (*wr08)(struct nvkm_pci *, u16 addr, u8 data); 12 void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
13 void (*wr32)(struct nvkm_pci *, u16 addr, u32 data); 13 void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
14 void (*msi_rearm)(struct nvkm_pci *); 14 void (*msi_rearm)(struct nvkm_pci *);
15
16 struct {
17 int (*init)(struct nvkm_pci *);
18 int (*set_link)(struct nvkm_pci *, enum nvkm_pcie_speed, u8);
19
20 enum nvkm_pcie_speed (*max_speed)(struct nvkm_pci *);
21 enum nvkm_pcie_speed (*cur_speed)(struct nvkm_pci *);
22
23 void (*set_version)(struct nvkm_pci *, u8);
24 int (*version)(struct nvkm_pci *);
25 int (*version_supported)(struct nvkm_pci *);
26 } pcie;
15}; 27};
16 28
17u32 nv40_pci_rd32(struct nvkm_pci *, u16); 29u32 nv40_pci_rd32(struct nvkm_pci *, u16);
@@ -22,4 +34,25 @@ void nv40_pci_msi_rearm(struct nvkm_pci *);
22void nv46_pci_msi_rearm(struct nvkm_pci *); 34void nv46_pci_msi_rearm(struct nvkm_pci *);
23 35
24void g84_pci_init(struct nvkm_pci *pci); 36void g84_pci_init(struct nvkm_pci *pci);
37
38/* pcie functions */
39void g84_pcie_set_version(struct nvkm_pci *, u8);
40int g84_pcie_version(struct nvkm_pci *);
41void g84_pcie_set_link_speed(struct nvkm_pci *, enum nvkm_pcie_speed);
42enum nvkm_pcie_speed g84_pcie_cur_speed(struct nvkm_pci *);
43enum nvkm_pcie_speed g84_pcie_max_speed(struct nvkm_pci *);
44int g84_pcie_init(struct nvkm_pci *);
45int g84_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8);
46
47int g94_pcie_version_supported(struct nvkm_pci *);
48
49void gf100_pcie_set_version(struct nvkm_pci *, u8);
50int gf100_pcie_version(struct nvkm_pci *);
51void gf100_pcie_set_cap_speed(struct nvkm_pci *, bool);
52int gf100_pcie_cap_speed(struct nvkm_pci *);
53int gf100_pcie_init(struct nvkm_pci *);
54int gf100_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8);
55
56int nvkm_pcie_oneinit(struct nvkm_pci *);
57int nvkm_pcie_init(struct nvkm_pci *);
25#endif 58#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 302557c52d03..770294457274 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -24,8 +24,8 @@ uint32_t gf100_pmu_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x00000512, 27 0x00000507,
28 0x000004af, 28 0x000004a4,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t gf100_pmu_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000075e, 49 0x00000753,
50 0x00000750, 50 0x00000745,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t gf100_pmu_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000762, 71 0x00000757,
72 0x00000760, 72 0x00000755,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t gf100_pmu_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000b92, 93 0x00000b87,
94 0x00000a35, 94 0x00000a2a,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t gf100_pmu_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000bbb, 115 0x00000bb0,
116 0x00000b94, 116 0x00000b89,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t gf100_pmu_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000bc7, 137 0x00000bbc,
138 0x00000bc5, 138 0x00000bba,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -229,26 +229,26 @@ uint32_t gf100_pmu_data[] = {
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00000001, 230 0x00000001,
231 0x00000000, 231 0x00000000,
232 0x00000551, 232 0x00000546,
233/* 0x037c: memx_func_next */ 233/* 0x037c: memx_func_next */
234 0x00000002, 234 0x00000002,
235 0x00000000, 235 0x00000000,
236 0x000005db, 236 0x000005d0,
237 0x00000003, 237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x000006a5, 239 0x0000069a,
240 0x00040004, 240 0x00040004,
241 0x00000000, 241 0x00000000,
242 0x000006c1, 242 0x000006b6,
243 0x00010005, 243 0x00010005,
244 0x00000000, 244 0x00000000,
245 0x000006de, 245 0x000006d3,
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x00000663, 248 0x00000658,
249 0x00000007, 249 0x00000007,
250 0x00000000, 250 0x00000000,
251 0x000006e9, 251 0x000006de,
252/* 0x03c4: memx_func_tail */ 252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */ 253/* 0x03c4: memx_ts_start */
254 0x00000000, 254 0x00000000,
@@ -917,7 +917,7 @@ uint32_t gf100_pmu_data[] = {
917}; 917};
918 918
919uint32_t gf100_pmu_code[] = { 919uint32_t gf100_pmu_code[] = {
920 0x039e0ef5, 920 0x03930ef5,
921/* 0x0004: rd32 */ 921/* 0x0004: rd32 */
922 0x07a007f1, 922 0x07a007f1,
923 0xd00604b6, 923 0xd00604b6,
@@ -987,7 +987,7 @@ uint32_t gf100_pmu_code[] = {
987 0xbb9a0a98, 987 0xbb9a0a98,
988 0x1cf4029a, 988 0x1cf4029a,
989 0x01d7f00f, 989 0x01d7f00f,
990 0x02dd21f5, 990 0x02d221f5,
991 0x0ef494bd, 991 0x0ef494bd,
992/* 0x00f9: intr_watchdog_next_time */ 992/* 0x00f9: intr_watchdog_next_time */
993 0x9b0a9815, 993 0x9b0a9815,
@@ -1039,7 +1039,7 @@ uint32_t gf100_pmu_code[] = {
1039 0x48e7f1c0, 1039 0x48e7f1c0,
1040 0x53e3f14f, 1040 0x53e3f14f,
1041 0x00d7f054, 1041 0x00d7f054,
1042 0x034221f5, 1042 0x033721f5,
1043 0x07f1c0fc, 1043 0x07f1c0fc,
1044 0x04b604c0, 1044 0x04b604c0,
1045 0x000cd006, 1045 0x000cd006,
@@ -1048,760 +1048,758 @@ uint32_t gf100_pmu_code[] = {
1048 0x04b60688, 1048 0x04b60688,
1049 0x0009d006, 1049 0x0009d006,
1050/* 0x01ca: intr_skip_subintr */ 1050/* 0x01ca: intr_skip_subintr */
1051 0x89c404bd, 1051 0x97f104bd,
1052 0x070bf420, 1052 0x90bd00e0,
1053 0xffbfa4f1, 1053 0xf00489fd,
1054/* 0x01d4: intr_skip_pause */ 1054 0x04b60407,
1055 0xf44089c4, 1055 0x0008d006,
1056 0xa4f1070b, 1056 0x80fc04bd,
1057/* 0x01de: intr_skip_user0 */ 1057 0xfc0088fe,
1058 0x07f0ffbf, 1058 0xfce0fcf0,
1059 0x0604b604, 1059 0xfcc0fcd0,
1060 0xbd0008d0, 1060 0xfca0fcb0,
1061 0xfe80fc04, 1061 0xfc80fc90,
1062 0xf0fc0088, 1062 0x0032f400,
1063 0xd0fce0fc, 1063/* 0x01fa: ticks_from_ns */
1064 0xb0fcc0fc, 1064 0xc0f901f8,
1065 0x90fca0fc,
1066 0x00fc80fc,
1067 0xf80032f4,
1068/* 0x0205: ticks_from_ns */
1069 0xf9c0f901,
1070 0xcbd7f1b0,
1071 0x00d3f000,
1072 0x041321f5,
1073 0x03e8ccec,
1074 0xf400b4b0,
1075 0xeeec120b,
1076 0xd7f103e8,
1077 0xd3f000cb,
1078 0x1321f500,
1079/* 0x022d: ticks_from_ns_quit */
1080 0x02ceb904,
1081 0xc0fcb0fc,
1082/* 0x0236: ticks_from_us */
1083 0xc0f900f8,
1084 0xd7f1b0f9, 1065 0xd7f1b0f9,
1085 0xd3f000cb, 1066 0xd3f000cb,
1086 0x1321f500, 1067 0x0821f500,
1087 0x02ceb904, 1068 0xe8ccec04,
1088 0xf400b4b0, 1069 0x00b4b003,
1089 0xe4bd050b, 1070 0xec120bf4,
1090/* 0x0250: ticks_from_us_quit */ 1071 0xf103e8ee,
1091 0xc0fcb0fc, 1072 0xf000cbd7,
1092/* 0x0256: ticks_to_us */ 1073 0x21f500d3,
1093 0xd7f100f8, 1074/* 0x0222: ticks_from_ns_quit */
1094 0xd3f000cb, 1075 0xceb90408,
1095 0xecedff00, 1076 0xfcb0fc02,
1096/* 0x0262: timer */ 1077/* 0x022b: ticks_from_us */
1097 0x90f900f8, 1078 0xf900f8c0,
1098 0x32f480f9, 1079 0xf1b0f9c0,
1099 0x03f89810, 1080 0xf000cbd7,
1100 0xf40086b0, 1081 0x21f500d3,
1101 0x84bd651c, 1082 0xceb90408,
1102 0xb63807f0, 1083 0x00b4b002,
1103 0x08d00604, 1084 0xbd050bf4,
1104 0xf004bd00, 1085/* 0x0245: ticks_from_us_quit */
1105 0x84b63487, 1086 0xfcb0fce4,
1106 0x0088cf06, 1087/* 0x024b: ticks_to_us */
1107 0xbb9a0998, 1088 0xf100f8c0,
1108 0xe9bb0298, 1089 0xf000cbd7,
1109 0x03fe8000, 1090 0xedff00d3,
1110 0xb60887f0, 1091/* 0x0257: timer */
1111 0x88cf0684, 1092 0xf900f8ec,
1112 0x0284f000, 1093 0xf480f990,
1113 0xf0261bf4, 1094 0xf8981032,
1114 0x84b63487, 1095 0x0086b003,
1115 0x0088cf06, 1096 0xbd651cf4,
1116 0xf406e0b8, 1097 0x3807f084,
1117 0xe8b8090b,
1118 0x111cf406,
1119/* 0x02b8: timer_reset */
1120 0xb63407f0,
1121 0x0ed00604,
1122 0x8004bd00,
1123/* 0x02c6: timer_enable */
1124 0x87f09a0e,
1125 0x3807f001,
1126 0xd00604b6, 1098 0xd00604b6,
1127 0x04bd0008, 1099 0x04bd0008,
1128/* 0x02d4: timer_done */ 1100 0xb63487f0,
1129 0xfc1031f4, 1101 0x88cf0684,
1130 0xf890fc80, 1102 0x9a099800,
1131/* 0x02dd: send_proc */ 1103 0xbb0298bb,
1132 0xf980f900, 1104 0xfe8000e9,
1133 0x05e89890, 1105 0x0887f003,
1134 0xf004e998, 1106 0xcf0684b6,
1135 0x89b80486, 1107 0x84f00088,
1136 0x2a0bf406, 1108 0x261bf402,
1137 0x940398c4, 1109 0xb63487f0,
1138 0x80b60488, 1110 0x88cf0684,
1139 0x008ebb18, 1111 0x06e0b800,
1140 0x8000fa98, 1112 0xb8090bf4,
1141 0x8d80008a, 1113 0x1cf406e8,
1142 0x028c8001, 1114/* 0x02ad: timer_reset */
1143 0xb6038b80, 1115 0x3407f011,
1144 0x94f00190,
1145 0x04e98007,
1146/* 0x0317: send_done */
1147 0xfc0231f4,
1148 0xf880fc90,
1149/* 0x031d: find */
1150 0xf080f900,
1151 0x31f45887,
1152/* 0x0325: find_loop */
1153 0x008a9801,
1154 0xf406aeb8,
1155 0x80b6100b,
1156 0x6886b158,
1157 0xf01bf402,
1158/* 0x033b: find_done */
1159 0xb90132f4,
1160 0x80fc028e,
1161/* 0x0342: send */
1162 0x21f500f8,
1163 0x01f4031d,
1164/* 0x034b: recv */
1165 0xf900f897,
1166 0x9880f990,
1167 0xe99805e8,
1168 0x0132f404,
1169 0xf40689b8,
1170 0x89c43d0b,
1171 0x0180b603,
1172 0x800784f0,
1173 0xea9805e8,
1174 0xfef0f902,
1175 0xf0f9018f,
1176 0x9402efb9,
1177 0xe9bb0499,
1178 0x18e0b600,
1179 0x9803eb98,
1180 0xed9802ec,
1181 0x00ee9801,
1182 0xf0fca5f9,
1183 0xf400f8fe,
1184 0xf0fc0131,
1185/* 0x0398: recv_done */
1186 0x90fc80fc,
1187/* 0x039e: init */
1188 0x17f100f8,
1189 0x14b60108,
1190 0x0011cf06,
1191 0x010911e7,
1192 0xfe0814b6,
1193 0x17f10014,
1194 0x13f000e0,
1195 0x1c07f000,
1196 0xd00604b6,
1197 0x04bd0001,
1198 0xf0ff17f0,
1199 0x04b61407,
1200 0x0001d006,
1201 0x17f004bd,
1202 0x0015f102,
1203 0x1007f008,
1204 0xd00604b6, 1116 0xd00604b6,
1205 0x04bd0001, 1117 0x04bd000e,
1206 0x011a17f1, 1118/* 0x02bb: timer_enable */
1207 0xfe0013f0, 1119 0xf09a0e80,
1208 0x31f40010, 1120 0x07f00187,
1209 0x0117f010, 1121 0x0604b638,
1210 0xb63807f0, 1122 0xbd0008d0,
1123/* 0x02c9: timer_done */
1124 0x1031f404,
1125 0x90fc80fc,
1126/* 0x02d2: send_proc */
1127 0x80f900f8,
1128 0xe89890f9,
1129 0x04e99805,
1130 0xb80486f0,
1131 0x0bf40689,
1132 0x0398c42a,
1133 0xb6048894,
1134 0x8ebb1880,
1135 0x00fa9800,
1136 0x80008a80,
1137 0x8c80018d,
1138 0x038b8002,
1139 0xf00190b6,
1140 0xe9800794,
1141 0x0231f404,
1142/* 0x030c: send_done */
1143 0x80fc90fc,
1144/* 0x0312: find */
1145 0x80f900f8,
1146 0xf45887f0,
1147/* 0x031a: find_loop */
1148 0x8a980131,
1149 0x06aeb800,
1150 0xb6100bf4,
1151 0x86b15880,
1152 0x1bf40268,
1153 0x0132f4f0,
1154/* 0x0330: find_done */
1155 0xfc028eb9,
1156/* 0x0337: send */
1157 0xf500f880,
1158 0xf4031221,
1159 0x00f89701,
1160/* 0x0340: recv */
1161 0x80f990f9,
1162 0x9805e898,
1163 0x32f404e9,
1164 0x0689b801,
1165 0xc43d0bf4,
1166 0x80b60389,
1167 0x0784f001,
1168 0x9805e880,
1169 0xf0f902ea,
1170 0xf9018ffe,
1171 0x02efb9f0,
1172 0xbb049994,
1173 0xe0b600e9,
1174 0x03eb9818,
1175 0x9802ec98,
1176 0xee9801ed,
1177 0xfca5f900,
1178 0x00f8fef0,
1179 0xfc0131f4,
1180/* 0x038d: recv_done */
1181 0xfc80fcf0,
1182/* 0x0393: init */
1183 0xf100f890,
1184 0xb6010817,
1185 0x11cf0614,
1186 0x0911e700,
1187 0x0814b601,
1188 0xf10014fe,
1189 0xf000e017,
1190 0x07f00013,
1191 0x0604b61c,
1192 0xbd0001d0,
1193 0xff17f004,
1194 0xb61407f0,
1211 0x01d00604, 1195 0x01d00604,
1212 0xf004bd00, 1196 0xf004bd00,
1213/* 0x0402: init_proc */ 1197 0x15f10217,
1214 0xf19858f7, 1198 0x07f00800,
1215 0x0016b001, 1199 0x0604b610,
1216 0xf9fa0bf4, 1200 0xbd0001d0,
1217 0x58f0b615, 1201 0x1a17f104,
1218/* 0x0413: mulu32_32_64 */ 1202 0x0013f001,
1219 0xf9f20ef4, 1203 0xf40010fe,
1220 0xf920f910, 1204 0x17f01031,
1221 0x9540f930, 1205 0x3807f001,
1222 0xd29510e1, 1206 0xd00604b6,
1223 0xbdc4bd10, 1207 0x04bd0001,
1224 0xc0edffb4, 1208/* 0x03f7: init_proc */
1225 0xb9301dff, 1209 0x9858f7f0,
1226 0x34f10234, 1210 0x16b001f1,
1227 0x34b6ffff, 1211 0xfa0bf400,
1228 0x1045b610, 1212 0xf0b615f9,
1229 0xbb00c3bb, 1213 0xf20ef458,
1230 0xe2ff01b4, 1214/* 0x0408: mulu32_32_64 */
1231 0x0234b930, 1215 0x20f910f9,
1232 0xffff34f1, 1216 0x40f930f9,
1233 0xb61034b6, 1217 0x9510e195,
1234 0xc3bb1045, 1218 0xc4bd10d2,
1235 0x01b4bb00, 1219 0xedffb4bd,
1236 0xbb3012ff, 1220 0x301dffc0,
1237 0x40fc00b3, 1221 0xf10234b9,
1238 0x20fc30fc, 1222 0xb6ffff34,
1239 0x00f810fc, 1223 0x45b61034,
1240/* 0x0464: host_send */ 1224 0x00c3bb10,
1241 0x04b017f1, 1225 0xff01b4bb,
1242 0xcf0614b6, 1226 0x34b930e2,
1243 0x27f10011, 1227 0xff34f102,
1244 0x24b604a0, 1228 0x1034b6ff,
1245 0x0022cf06, 1229 0xbb1045b6,
1246 0xf40612b8, 1230 0xb4bb00c3,
1247 0x1ec4320b, 1231 0x3012ff01,
1248 0x04ee9407, 1232 0xfc00b3bb,
1249 0x0270e0b7, 1233 0xfc30fc40,
1250 0x9803eb98, 1234 0xf810fc20,
1251 0xed9802ec, 1235/* 0x0459: host_send */
1252 0x00ee9801, 1236 0xb017f100,
1253 0x034221f5,
1254 0xc40110b6,
1255 0x07f10f1e,
1256 0x04b604b0,
1257 0x000ed006,
1258 0x0ef404bd,
1259/* 0x04ad: host_send_done */
1260/* 0x04af: host_recv */
1261 0xf100f8ba,
1262 0xf14e4917,
1263 0xb8525413,
1264 0x0bf406e1,
1265/* 0x04bd: host_recv_wait */
1266 0xcc17f1aa,
1267 0x0614b604, 1237 0x0614b604,
1268 0xf10011cf, 1238 0xf10011cf,
1269 0xb604c827, 1239 0xb604a027,
1270 0x22cf0624, 1240 0x22cf0624,
1271 0x0816f000, 1241 0x0612b800,
1272 0xf40612b8, 1242 0xc4320bf4,
1273 0x23c4e60b, 1243 0xee94071e,
1274 0x0434b607, 1244 0x70e0b704,
1275 0x02f030b7, 1245 0x03eb9802,
1276 0x80033b80, 1246 0x9802ec98,
1277 0x3d80023c, 1247 0xee9801ed,
1278 0x003e8001, 1248 0x3721f500,
1279 0xf00120b6, 1249 0x0110b603,
1280 0x07f10f24, 1250 0xf10f1ec4,
1281 0x04b604c8, 1251 0xb604b007,
1282 0x0002d006, 1252 0x0ed00604,
1283 0x27f004bd, 1253 0xf404bd00,
1284 0x0007f040, 1254/* 0x04a2: host_send_done */
1285 0xd00604b6, 1255 0x00f8ba0e,
1286 0x04bd0002, 1256/* 0x04a4: host_recv */
1287/* 0x0512: host_init */ 1257 0x4e4917f1,
1288 0x17f100f8, 1258 0x525413f1,
1259 0xf406e1b8,
1260/* 0x04b2: host_recv_wait */
1261 0x17f1aa0b,
1262 0x14b604cc,
1263 0x0011cf06,
1264 0x04c827f1,
1265 0xcf0624b6,
1266 0x16f00022,
1267 0x0612b808,
1268 0xc4e60bf4,
1269 0x34b60723,
1270 0xf030b704,
1271 0x033b8002,
1272 0x80023c80,
1273 0x3e80013d,
1274 0x0120b600,
1275 0xf10f24f0,
1276 0xb604c807,
1277 0x02d00604,
1278 0xf004bd00,
1279 0x07f04027,
1280 0x0604b600,
1281 0xbd0002d0,
1282/* 0x0507: host_init */
1283 0xf100f804,
1284 0xb6008017,
1285 0x15f11014,
1286 0x07f10270,
1287 0x04b604d0,
1288 0x0001d006,
1289 0x17f104bd,
1289 0x14b60080, 1290 0x14b60080,
1290 0x7015f110, 1291 0xf015f110,
1291 0xd007f102, 1292 0xdc07f102,
1292 0x0604b604, 1293 0x0604b604,
1293 0xbd0001d0, 1294 0xbd0001d0,
1294 0x8017f104, 1295 0x0117f004,
1295 0x1014b600, 1296 0x04c407f1,
1296 0x02f015f1,
1297 0x04dc07f1,
1298 0xd00604b6, 1297 0xd00604b6,
1299 0x04bd0001, 1298 0x04bd0001,
1300 0xf10117f0, 1299/* 0x0546: memx_func_enter */
1301 0xb604c407, 1300 0x67f100f8,
1302 0x01d00604, 1301 0x77f11620,
1303 0xf804bd00, 1302 0x73f1f55d,
1304/* 0x0551: memx_func_enter */ 1303 0x6eb9ffff,
1305 0x2067f100, 1304 0x0421f402,
1306 0x5d77f116, 1305 0xfd02d8b9,
1307 0xff73f1f5, 1306 0x60f90487,
1307 0xd0fc80f9,
1308 0x21f4e0fc,
1309 0xfe77f13f,
1310 0xff73f1ff,
1308 0x026eb9ff, 1311 0x026eb9ff,
1309 0xb90421f4, 1312 0xb90421f4,
1310 0x87fd02d8, 1313 0x87fd02d8,
1311 0xf960f904, 1314 0xf960f904,
1312 0xfcd0fc80, 1315 0xfcd0fc80,
1313 0x3f21f4e0, 1316 0x3f21f4e0,
1314 0xfffe77f1, 1317 0x26f067f1,
1315 0xffff73f1,
1316 0xf4026eb9, 1318 0xf4026eb9,
1317 0xd8b90421, 1319 0xd8b90421,
1318 0x0487fd02, 1320 0x0487fd02,
1319 0x80f960f9, 1321 0x80f960f9,
1320 0xe0fcd0fc, 1322 0xe0fcd0fc,
1321 0xf13f21f4, 1323 0xf03f21f4,
1322 0xb926f067,
1323 0x21f4026e,
1324 0x02d8b904,
1325 0xf90487fd,
1326 0xfc80f960,
1327 0xf4e0fcd0,
1328 0x67f03f21,
1329 0xe007f104,
1330 0x0604b607,
1331 0xbd0006d0,
1332/* 0x05bd: memx_func_enter_wait */
1333 0xc067f104,
1334 0x0664b607,
1335 0xf00066cf,
1336 0x0bf40464,
1337 0x2c67f0f3,
1338 0xcf0664b6,
1339 0x06800066,
1340/* 0x05db: memx_func_leave */
1341 0xf000f8f1,
1342 0x64b62c67,
1343 0x0066cf06,
1344 0xf0f20680,
1345 0x07f10467, 1324 0x07f10467,
1346 0x04b607e4, 1325 0x04b607e0,
1347 0x0006d006, 1326 0x0006d006,
1348/* 0x05f6: memx_func_leave_wait */ 1327/* 0x05b2: memx_func_enter_wait */
1349 0x67f104bd, 1328 0x67f104bd,
1350 0x64b607c0, 1329 0x64b607c0,
1351 0x0066cf06, 1330 0x0066cf06,
1352 0xf40464f0, 1331 0xf40464f0,
1353 0x67f1f31b, 1332 0x67f0f30b,
1354 0x77f126f0, 1333 0x0664b62c,
1355 0x73f00001, 1334 0x800066cf,
1356 0x026eb900, 1335 0x00f8f106,
1357 0xb90421f4, 1336/* 0x05d0: memx_func_leave */
1358 0x87fd02d8, 1337 0xb62c67f0,
1359 0xf960f905, 1338 0x66cf0664,
1360 0xfcd0fc80, 1339 0xf2068000,
1361 0x3f21f4e0, 1340 0xf10467f0,
1362 0x162067f1, 1341 0xb607e407,
1363 0xf4026eb9, 1342 0x06d00604,
1364 0xd8b90421, 1343/* 0x05eb: memx_func_leave_wait */
1365 0x0587fd02, 1344 0xf104bd00,
1366 0x80f960f9, 1345 0xb607c067,
1367 0xe0fcd0fc, 1346 0x66cf0664,
1368 0xf13f21f4, 1347 0x0464f000,
1369 0xf00aa277, 1348 0xf1f31bf4,
1349 0xf126f067,
1350 0xf0000177,
1370 0x6eb90073, 1351 0x6eb90073,
1371 0x0421f402, 1352 0x0421f402,
1372 0xfd02d8b9, 1353 0xfd02d8b9,
1373 0x60f90587, 1354 0x60f90587,
1374 0xd0fc80f9, 1355 0xd0fc80f9,
1375 0x21f4e0fc, 1356 0x21f4e0fc,
1376/* 0x0663: memx_func_wait_vblank */ 1357 0x2067f13f,
1377 0x9800f83f, 1358 0x026eb916,
1378 0x66b00016, 1359 0xb90421f4,
1379 0x130bf400, 1360 0x87fd02d8,
1380 0xf40166b0, 1361 0xf960f905,
1381 0x0ef4060b, 1362 0xfcd0fc80,
1382/* 0x0675: memx_func_wait_vblank_head1 */ 1363 0x3f21f4e0,
1383 0x2077f12e, 1364 0x0aa277f1,
1384 0x070ef400, 1365 0xb90073f0,
1385/* 0x067c: memx_func_wait_vblank_head0 */ 1366 0x21f4026e,
1386 0x000877f1, 1367 0x02d8b904,
1387/* 0x0680: memx_func_wait_vblank_0 */ 1368 0xf90587fd,
1388 0x07c467f1, 1369 0xfc80f960,
1389 0xcf0664b6, 1370 0xf4e0fcd0,
1390 0x67fd0066, 1371 0x00f83f21,
1391 0xf31bf404, 1372/* 0x0658: memx_func_wait_vblank */
1392/* 0x0690: memx_func_wait_vblank_1 */ 1373 0xb0001698,
1393 0x07c467f1, 1374 0x0bf40066,
1394 0xcf0664b6, 1375 0x0166b013,
1395 0x67fd0066, 1376 0xf4060bf4,
1396 0xf30bf404, 1377/* 0x066a: memx_func_wait_vblank_head1 */
1397/* 0x06a0: memx_func_wait_vblank_fini */ 1378 0x77f12e0e,
1398 0xf80410b6, 1379 0x0ef40020,
1399/* 0x06a5: memx_func_wr32 */ 1380/* 0x0671: memx_func_wait_vblank_head0 */
1400 0x00169800, 1381 0x0877f107,
1401 0xb6011598, 1382/* 0x0675: memx_func_wait_vblank_0 */
1402 0x60f90810, 1383 0xc467f100,
1403 0xd0fc50f9, 1384 0x0664b607,
1404 0x21f4e0fc, 1385 0xfd0066cf,
1405 0x0242b63f, 1386 0x1bf40467,
1406 0xf8e91bf4, 1387/* 0x0685: memx_func_wait_vblank_1 */
1407/* 0x06c1: memx_func_wait */ 1388 0xc467f1f3,
1408 0x2c87f000, 1389 0x0664b607,
1409 0xcf0684b6, 1390 0xfd0066cf,
1410 0x1e980088, 1391 0x0bf40467,
1411 0x011d9800, 1392/* 0x0695: memx_func_wait_vblank_fini */
1412 0x98021c98, 1393 0x0410b6f3,
1413 0x10b6031b, 1394/* 0x069a: memx_func_wr32 */
1414 0xa421f410, 1395 0x169800f8,
1415/* 0x06de: memx_func_delay */ 1396 0x01159800,
1416 0x1e9800f8, 1397 0xf90810b6,
1398 0xfc50f960,
1399 0xf4e0fcd0,
1400 0x42b63f21,
1401 0xe91bf402,
1402/* 0x06b6: memx_func_wait */
1403 0x87f000f8,
1404 0x0684b62c,
1405 0x980088cf,
1406 0x1d98001e,
1407 0x021c9801,
1408 0xb6031b98,
1409 0x21f41010,
1410/* 0x06d3: memx_func_delay */
1411 0x9800f8a4,
1412 0x10b6001e,
1413 0x7f21f404,
1414/* 0x06de: memx_func_train */
1415 0x00f800f8,
1416/* 0x06e0: memx_exec */
1417 0xd0f9e0f9,
1418 0xb902c1b9,
1419/* 0x06ea: memx_exec_next */
1420 0x139802b2,
1417 0x0410b600, 1421 0x0410b600,
1422 0x01f034e7,
1423 0x01e033e7,
1424 0xf00132b6,
1425 0x35980c30,
1426 0xb855f9de,
1427 0x1ef40612,
1428 0xf10b98e4,
1429 0xbbf20c98,
1430 0xb7f102cb,
1431 0xb4b607c4,
1432 0x00bbcf06,
1433 0xe0fcd0fc,
1434 0x033721f5,
1435/* 0x0726: memx_info */
1436 0xc67000f8,
1437 0x0e0bf401,
1438/* 0x072c: memx_info_data */
1439 0x03ccc7f1,
1440 0x0800b7f1,
1441/* 0x0737: memx_info_train */
1442 0xf10b0ef4,
1443 0xf10bccc7,
1444/* 0x073f: memx_info_send */
1445 0xf50100b7,
1446 0xf8033721,
1447/* 0x0745: memx_recv */
1448 0x01d6b000,
1449 0xb0980bf4,
1450 0x0bf400d6,
1451/* 0x0753: memx_init */
1452 0xf800f8d8,
1453/* 0x0755: perf_recv */
1454/* 0x0757: perf_init */
1455 0xf800f800,
1456/* 0x0759: i2c_drive_scl */
1457 0x0036b000,
1458 0xf1110bf4,
1459 0xb607e007,
1460 0x01d00604,
1461 0xf804bd00,
1462/* 0x076d: i2c_drive_scl_lo */
1463 0xe407f100,
1464 0x0604b607,
1465 0xbd0001d0,
1466/* 0x077b: i2c_drive_sda */
1467 0xb000f804,
1468 0x0bf40036,
1469 0xe007f111,
1470 0x0604b607,
1471 0xbd0002d0,
1472/* 0x078f: i2c_drive_sda_lo */
1473 0xf100f804,
1474 0xb607e407,
1475 0x02d00604,
1476 0xf804bd00,
1477/* 0x079d: i2c_sense_scl */
1478 0x0132f400,
1479 0x07c437f1,
1480 0xcf0634b6,
1481 0x31fd0033,
1482 0x060bf404,
1483/* 0x07b3: i2c_sense_scl_done */
1484 0xf80131f4,
1485/* 0x07b5: i2c_sense_sda */
1486 0x0132f400,
1487 0x07c437f1,
1488 0xcf0634b6,
1489 0x32fd0033,
1490 0x060bf404,
1491/* 0x07cb: i2c_sense_sda_done */
1492 0xf80131f4,
1493/* 0x07cd: i2c_raise_scl */
1494 0xf140f900,
1495 0xf0089847,
1496 0x21f50137,
1497/* 0x07da: i2c_raise_scl_wait */
1498 0xe7f10759,
1499 0x21f403e8,
1500 0x9d21f57f,
1501 0x0901f407,
1502 0xf40142b6,
1503/* 0x07ee: i2c_raise_scl_done */
1504 0x40fcef1b,
1505/* 0x07f2: i2c_start */
1506 0x21f500f8,
1507 0x11f4079d,
1508 0xb521f50d,
1509 0x0611f407,
1510/* 0x0803: i2c_start_rep */
1511 0xf0300ef4,
1512 0x21f50037,
1513 0x37f00759,
1514 0x7b21f501,
1515 0x0076bb07,
1516 0xf90465b6,
1517 0x04659450,
1518 0xbd0256bb,
1519 0x0475fd50,
1520 0x21f550fc,
1521 0x64b607cd,
1522 0x1f11f404,
1523/* 0x0830: i2c_start_send */
1524 0xf50037f0,
1525 0xf1077b21,
1526 0xf41388e7,
1527 0x37f07f21,
1528 0x5921f500,
1529 0x88e7f107,
1530 0x7f21f413,
1531/* 0x084c: i2c_start_out */
1532/* 0x084e: i2c_stop */
1533 0x37f000f8,
1534 0x5921f500,
1535 0x0037f007,
1536 0x077b21f5,
1537 0x03e8e7f1,
1538 0xf07f21f4,
1539 0x21f50137,
1540 0xe7f10759,
1541 0x21f41388,
1542 0x0137f07f,
1543 0x077b21f5,
1544 0x1388e7f1,
1418 0xf87f21f4, 1545 0xf87f21f4,
1419/* 0x06e9: memx_func_train */ 1546/* 0x0881: i2c_bitw */
1420/* 0x06eb: memx_exec */ 1547 0x7b21f500,
1421 0xf900f800,
1422 0xb9d0f9e0,
1423 0xb2b902c1,
1424/* 0x06f5: memx_exec_next */
1425 0x00139802,
1426 0xe70410b6,
1427 0xe701f034,
1428 0xb601e033,
1429 0x30f00132,
1430 0xde35980c,
1431 0x12b855f9,
1432 0xe41ef406,
1433 0x98f10b98,
1434 0xcbbbf20c,
1435 0xc4b7f102,
1436 0x06b4b607,
1437 0xfc00bbcf,
1438 0xf5e0fcd0,
1439 0xf8034221,
1440/* 0x0731: memx_info */
1441 0x01c67000,
1442/* 0x0737: memx_info_data */
1443 0xf10e0bf4,
1444 0xf103ccc7,
1445 0xf40800b7,
1446/* 0x0742: memx_info_train */
1447 0xc7f10b0e,
1448 0xb7f10bcc,
1449/* 0x074a: memx_info_send */
1450 0x21f50100,
1451 0x00f80342,
1452/* 0x0750: memx_recv */
1453 0xf401d6b0,
1454 0xd6b0980b,
1455 0xd80bf400,
1456/* 0x075e: memx_init */
1457 0x00f800f8,
1458/* 0x0760: perf_recv */
1459/* 0x0762: perf_init */
1460 0x00f800f8,
1461/* 0x0764: i2c_drive_scl */
1462 0xf40036b0,
1463 0x07f1110b,
1464 0x04b607e0,
1465 0x0001d006,
1466 0x00f804bd,
1467/* 0x0778: i2c_drive_scl_lo */
1468 0x07e407f1,
1469 0xd00604b6,
1470 0x04bd0001,
1471/* 0x0786: i2c_drive_sda */
1472 0x36b000f8,
1473 0x110bf400,
1474 0x07e007f1,
1475 0xd00604b6,
1476 0x04bd0002,
1477/* 0x079a: i2c_drive_sda_lo */
1478 0x07f100f8,
1479 0x04b607e4,
1480 0x0002d006,
1481 0x00f804bd,
1482/* 0x07a8: i2c_sense_scl */
1483 0xf10132f4,
1484 0xb607c437,
1485 0x33cf0634,
1486 0x0431fd00,
1487 0xf4060bf4,
1488/* 0x07be: i2c_sense_scl_done */
1489 0x00f80131,
1490/* 0x07c0: i2c_sense_sda */
1491 0xf10132f4,
1492 0xb607c437,
1493 0x33cf0634,
1494 0x0432fd00,
1495 0xf4060bf4,
1496/* 0x07d6: i2c_sense_sda_done */
1497 0x00f80131,
1498/* 0x07d8: i2c_raise_scl */
1499 0x47f140f9,
1500 0x37f00898,
1501 0x6421f501,
1502/* 0x07e5: i2c_raise_scl_wait */
1503 0xe8e7f107, 1548 0xe8e7f107,
1504 0x7f21f403, 1549 0x7f21f403,
1505 0x07a821f5,
1506 0xb60901f4,
1507 0x1bf40142,
1508/* 0x07f9: i2c_raise_scl_done */
1509 0xf840fcef,
1510/* 0x07fd: i2c_start */
1511 0xa821f500,
1512 0x0d11f407,
1513 0x07c021f5,
1514 0xf40611f4,
1515/* 0x080e: i2c_start_rep */
1516 0x37f0300e,
1517 0x6421f500,
1518 0x0137f007,
1519 0x078621f5,
1520 0xb60076bb, 1550 0xb60076bb,
1521 0x50f90465, 1551 0x50f90465,
1522 0xbb046594, 1552 0xbb046594,
1523 0x50bd0256, 1553 0x50bd0256,
1524 0xfc0475fd, 1554 0xfc0475fd,
1525 0xd821f550, 1555 0xcd21f550,
1526 0x0464b607, 1556 0x0464b607,
1527/* 0x083b: i2c_start_send */ 1557 0xf11811f4,
1528 0xf01f11f4, 1558 0xf41388e7,
1529 0x21f50037,
1530 0xe7f10786,
1531 0x21f41388,
1532 0x0037f07f,
1533 0x076421f5,
1534 0x1388e7f1,
1535/* 0x0857: i2c_start_out */
1536 0xf87f21f4,
1537/* 0x0859: i2c_stop */
1538 0x0037f000,
1539 0x076421f5,
1540 0xf50037f0,
1541 0xf1078621,
1542 0xf403e8e7,
1543 0x37f07f21, 1559 0x37f07f21,
1544 0x6421f501, 1560 0x5921f500,
1545 0x88e7f107, 1561 0x88e7f107,
1546 0x7f21f413, 1562 0x7f21f413,
1547 0xf50137f0, 1563/* 0x08c0: i2c_bitw_out */
1548 0xf1078621, 1564/* 0x08c2: i2c_bitr */
1549 0xf41388e7, 1565 0x37f000f8,
1550 0x00f87f21, 1566 0x7b21f501,
1551/* 0x088c: i2c_bitw */ 1567 0xe8e7f107,
1552 0x078621f5, 1568 0x7f21f403,
1553 0x03e8e7f1, 1569 0xb60076bb,
1554 0xbb7f21f4, 1570 0x50f90465,
1555 0x65b60076, 1571 0xbb046594,
1556 0x9450f904, 1572 0x50bd0256,
1557 0x56bb0465, 1573 0xfc0475fd,
1558 0xfd50bd02, 1574 0xcd21f550,
1559 0x50fc0475, 1575 0x0464b607,
1560 0x07d821f5, 1576 0xf51b11f4,
1561 0xf40464b6, 1577 0xf007b521,
1562 0xe7f11811, 1578 0x21f50037,
1579 0xe7f10759,
1563 0x21f41388, 1580 0x21f41388,
1564 0x0037f07f, 1581 0x013cf07f,
1565 0x076421f5, 1582/* 0x0907: i2c_bitr_done */
1566 0x1388e7f1, 1583 0xf80131f4,
1567/* 0x08cb: i2c_bitw_out */ 1584/* 0x0909: i2c_get_byte */
1568 0xf87f21f4, 1585 0x0057f000,
1569/* 0x08cd: i2c_bitr */ 1586/* 0x090f: i2c_get_byte_next */
1570 0x0137f000, 1587 0xb60847f0,
1571 0x078621f5, 1588 0x76bb0154,
1572 0x03e8e7f1, 1589 0x0465b600,
1573 0xbb7f21f4, 1590 0x659450f9,
1574 0x65b60076, 1591 0x0256bb04,
1575 0x9450f904, 1592 0x75fd50bd,
1576 0x56bb0465, 1593 0xf550fc04,
1577 0xfd50bd02, 1594 0xb608c221,
1578 0x50fc0475, 1595 0x11f40464,
1579 0x07d821f5, 1596 0x0553fd2b,
1580 0xf40464b6, 1597 0xf40142b6,
1581 0x21f51b11, 1598 0x37f0d81b,
1582 0x37f007c0,
1583 0x6421f500,
1584 0x88e7f107,
1585 0x7f21f413,
1586 0xf4013cf0,
1587/* 0x0912: i2c_bitr_done */
1588 0x00f80131,
1589/* 0x0914: i2c_get_byte */
1590 0xf00057f0,
1591/* 0x091a: i2c_get_byte_next */
1592 0x54b60847,
1593 0x0076bb01, 1599 0x0076bb01,
1594 0xf90465b6, 1600 0xf90465b6,
1595 0x04659450, 1601 0x04659450,
1596 0xbd0256bb, 1602 0xbd0256bb,
1597 0x0475fd50, 1603 0x0475fd50,
1598 0x21f550fc, 1604 0x21f550fc,
1599 0x64b608cd, 1605 0x64b60881,
1600 0x2b11f404, 1606/* 0x0959: i2c_get_byte_done */
1601 0xb60553fd, 1607/* 0x095b: i2c_put_byte */
1602 0x1bf40142, 1608 0xf000f804,
1603 0x0137f0d8, 1609/* 0x095e: i2c_put_byte_next */
1610 0x42b60847,
1611 0x3854ff01,
1604 0xb60076bb, 1612 0xb60076bb,
1605 0x50f90465, 1613 0x50f90465,
1606 0xbb046594, 1614 0xbb046594,
1607 0x50bd0256, 1615 0x50bd0256,
1608 0xfc0475fd, 1616 0xfc0475fd,
1609 0x8c21f550, 1617 0x8121f550,
1610 0x0464b608, 1618 0x0464b608,
1611/* 0x0964: i2c_get_byte_done */ 1619 0xb03411f4,
1612/* 0x0966: i2c_put_byte */ 1620 0x1bf40046,
1613 0x47f000f8, 1621 0x0076bbd8,
1614/* 0x0969: i2c_put_byte_next */
1615 0x0142b608,
1616 0xbb3854ff,
1617 0x65b60076,
1618 0x9450f904,
1619 0x56bb0465,
1620 0xfd50bd02,
1621 0x50fc0475,
1622 0x088c21f5,
1623 0xf40464b6,
1624 0x46b03411,
1625 0xd81bf400,
1626 0xb60076bb,
1627 0x50f90465,
1628 0xbb046594,
1629 0x50bd0256,
1630 0xfc0475fd,
1631 0xcd21f550,
1632 0x0464b608,
1633 0xbb0f11f4,
1634 0x36b00076,
1635 0x061bf401,
1636/* 0x09bf: i2c_put_byte_done */
1637 0xf80132f4,
1638/* 0x09c1: i2c_addr */
1639 0x0076bb00,
1640 0xf90465b6, 1622 0xf90465b6,
1641 0x04659450, 1623 0x04659450,
1642 0xbd0256bb, 1624 0xbd0256bb,
1643 0x0475fd50, 1625 0x0475fd50,
1644 0x21f550fc, 1626 0x21f550fc,
1645 0x64b607fd, 1627 0x64b608c2,
1646 0x2911f404, 1628 0x0f11f404,
1647 0x012ec3e7, 1629 0xb00076bb,
1648 0xfd0134b6, 1630 0x1bf40136,
1649 0x76bb0553, 1631 0x0132f406,
1632/* 0x09b4: i2c_put_byte_done */
1633/* 0x09b6: i2c_addr */
1634 0x76bb00f8,
1650 0x0465b600, 1635 0x0465b600,
1651 0x659450f9, 1636 0x659450f9,
1652 0x0256bb04, 1637 0x0256bb04,
1653 0x75fd50bd, 1638 0x75fd50bd,
1654 0xf550fc04, 1639 0xf550fc04,
1655 0xb6096621, 1640 0xb607f221,
1656/* 0x0a06: i2c_addr_done */ 1641 0x11f40464,
1657 0x00f80464, 1642 0x2ec3e729,
1658/* 0x0a08: i2c_acquire_addr */ 1643 0x0134b601,
1659 0xb6f8cec7, 1644 0xbb0553fd,
1660 0xe0b702e4,
1661 0xee980d1c,
1662/* 0x0a17: i2c_acquire */
1663 0xf500f800,
1664 0xf40a0821,
1665 0xd9f00421,
1666 0x3f21f403,
1667/* 0x0a26: i2c_release */
1668 0x21f500f8,
1669 0x21f40a08,
1670 0x03daf004,
1671 0xf83f21f4,
1672/* 0x0a35: i2c_recv */
1673 0x0132f400,
1674 0xb6f8c1c7,
1675 0x16b00214,
1676 0x3a1ff528,
1677 0xf413a001,
1678 0x0032980c,
1679 0x0ccc13a0,
1680 0xf4003198,
1681 0xd0f90231,
1682 0xd0f9e0f9,
1683 0x000067f1,
1684 0x100063f1,
1685 0xbb016792,
1686 0x65b60076, 1645 0x65b60076,
1687 0x9450f904, 1646 0x9450f904,
1688 0x56bb0465, 1647 0x56bb0465,
1689 0xfd50bd02, 1648 0xfd50bd02,
1690 0x50fc0475, 1649 0x50fc0475,
1691 0x0a1721f5, 1650 0x095b21f5,
1692 0xfc0464b6, 1651/* 0x09fb: i2c_addr_done */
1693 0x00d6b0d0, 1652 0xf80464b6,
1694 0x00b31bf5, 1653/* 0x09fd: i2c_acquire_addr */
1695 0xbb0057f0, 1654 0xf8cec700,
1655 0xb702e4b6,
1656 0x980d1ce0,
1657 0x00f800ee,
1658/* 0x0a0c: i2c_acquire */
1659 0x09fd21f5,
1660 0xf00421f4,
1661 0x21f403d9,
1662/* 0x0a1b: i2c_release */
1663 0xf500f83f,
1664 0xf409fd21,
1665 0xdaf00421,
1666 0x3f21f403,
1667/* 0x0a2a: i2c_recv */
1668 0x32f400f8,
1669 0xf8c1c701,
1670 0xb00214b6,
1671 0x1ff52816,
1672 0x13a0013a,
1673 0x32980cf4,
1674 0xcc13a000,
1675 0x0031980c,
1676 0xf90231f4,
1677 0xf9e0f9d0,
1678 0x0067f1d0,
1679 0x0063f100,
1680 0x01679210,
1681 0xb60076bb,
1682 0x50f90465,
1683 0xbb046594,
1684 0x50bd0256,
1685 0xfc0475fd,
1686 0x0c21f550,
1687 0x0464b60a,
1688 0xd6b0d0fc,
1689 0xb31bf500,
1690 0x0057f000,
1691 0xb60076bb,
1692 0x50f90465,
1693 0xbb046594,
1694 0x50bd0256,
1695 0xfc0475fd,
1696 0xb621f550,
1697 0x0464b609,
1698 0x00d011f5,
1699 0xbbe0c5c7,
1696 0x65b60076, 1700 0x65b60076,
1697 0x9450f904, 1701 0x9450f904,
1698 0x56bb0465, 1702 0x56bb0465,
1699 0xfd50bd02, 1703 0xfd50bd02,
1700 0x50fc0475, 1704 0x50fc0475,
1701 0x09c121f5, 1705 0x095b21f5,
1702 0xf50464b6, 1706 0xf50464b6,
1703 0xc700d011, 1707 0xf000ad11,
1704 0x76bbe0c5, 1708 0x76bb0157,
1705 0x0465b600, 1709 0x0465b600,
1706 0x659450f9, 1710 0x659450f9,
1707 0x0256bb04, 1711 0x0256bb04,
1708 0x75fd50bd, 1712 0x75fd50bd,
1709 0xf550fc04, 1713 0xf550fc04,
1710 0xb6096621, 1714 0xb609b621,
1711 0x11f50464, 1715 0x11f50464,
1712 0x57f000ad, 1716 0x76bb008a,
1713 0x0076bb01, 1717 0x0465b600,
1714 0xf90465b6, 1718 0x659450f9,
1715 0x04659450, 1719 0x0256bb04,
1716 0xbd0256bb, 1720 0x75fd50bd,
1717 0x0475fd50, 1721 0xf550fc04,
1718 0x21f550fc, 1722 0xb6090921,
1719 0x64b609c1, 1723 0x11f40464,
1720 0x8a11f504, 1724 0xe05bcb6a,
1721 0x0076bb00, 1725 0xb60076bb,
1722 0xf90465b6, 1726 0x50f90465,
1723 0x04659450, 1727 0xbb046594,
1724 0xbd0256bb, 1728 0x50bd0256,
1725 0x0475fd50, 1729 0xfc0475fd,
1726 0x21f550fc, 1730 0x4e21f550,
1727 0x64b60914, 1731 0x0464b608,
1728 0x6a11f404, 1732 0xbd025bb9,
1729 0xbbe05bcb, 1733 0x430ef474,
1730 0x65b60076, 1734/* 0x0b30: i2c_recv_not_rd08 */
1731 0x9450f904, 1735 0xf401d6b0,
1732 0x56bb0465, 1736 0x57f03d1b,
1733 0xfd50bd02, 1737 0xb621f500,
1734 0x50fc0475, 1738 0x3311f409,
1735 0x085921f5, 1739 0xf5e0c5c7,
1736 0xb90464b6, 1740 0xf4095b21,
1737 0x74bd025b, 1741 0x57f02911,
1738/* 0x0b3b: i2c_recv_not_rd08 */ 1742 0xb621f500,
1739 0xb0430ef4, 1743 0x1f11f409,
1740 0x1bf401d6, 1744 0xf5e0b5c7,
1741 0x0057f03d, 1745 0xf4095b21,
1742 0x09c121f5, 1746 0x21f51511,
1743 0xc73311f4, 1747 0x74bd084e,
1744 0x21f5e0c5, 1748 0xf408c5c7,
1745 0x11f40966, 1749 0x32f4091b,
1746 0x0057f029, 1750 0x030ef402,
1747 0x09c121f5, 1751/* 0x0b70: i2c_recv_not_wr08 */
1748 0xc71f11f4, 1752/* 0x0b70: i2c_recv_done */
1749 0x21f5e0b5, 1753 0xf5f8cec7,
1750 0x11f40966, 1754 0xfc0a1b21,
1751 0x5921f515, 1755 0xf4d0fce0,
1752 0xc774bd08, 1756 0x7cb90a12,
1753 0x1bf408c5, 1757 0x3721f502,
1754 0x0232f409, 1758/* 0x0b85: i2c_recv_exit */
1755/* 0x0b7b: i2c_recv_not_wr08 */ 1759/* 0x0b87: i2c_init */
1756/* 0x0b7b: i2c_recv_done */ 1760 0xf800f803,
1757 0xc7030ef4, 1761/* 0x0b89: test_recv */
1758 0x21f5f8ce, 1762 0xd817f100,
1759 0xe0fc0a26, 1763 0x0614b605,
1760 0x12f4d0fc, 1764 0xb60011cf,
1761 0x027cb90a, 1765 0x07f10110,
1762 0x034221f5, 1766 0x04b605d8,
1763/* 0x0b90: i2c_recv_exit */ 1767 0x0001d006,
1764/* 0x0b92: i2c_init */ 1768 0xe7f104bd,
1769 0xe3f1d900,
1770 0x21f5134f,
1771 0x00f80257,
1772/* 0x0bb0: test_init */
1773 0x0800e7f1,
1774 0x025721f5,
1775/* 0x0bba: idle_recv */
1765 0x00f800f8, 1776 0x00f800f8,
1766/* 0x0b94: test_recv */ 1777/* 0x0bbc: idle */
1767 0x05d817f1, 1778 0xf10031f4,
1768 0xcf0614b6, 1779 0xb605d417,
1769 0x10b60011, 1780 0x11cf0614,
1770 0xd807f101, 1781 0x0110b600,
1771 0x0604b605, 1782 0x05d407f1,
1772 0xbd0001d0, 1783 0xd00604b6,
1773 0x00e7f104, 1784 0x04bd0001,
1774 0x4fe3f1d9, 1785/* 0x0bd8: idle_loop */
1775 0x6221f513, 1786 0xf45817f0,
1776/* 0x0bbb: test_init */ 1787/* 0x0bde: idle_proc */
1777 0xf100f802, 1788/* 0x0bde: idle_proc_exec */
1778 0xf50800e7, 1789 0x10f90232,
1779 0xf8026221, 1790 0xf5021eb9,
1780/* 0x0bc5: idle_recv */ 1791 0xfc034021,
1781/* 0x0bc7: idle */ 1792 0x0911f410,
1782 0xf400f800, 1793 0xf40231f4,
1783 0x17f10031, 1794/* 0x0bf2: idle_proc_next */
1784 0x14b605d4, 1795 0x10b6ef0e,
1785 0x0011cf06, 1796 0x061fb858,
1786 0xf10110b6, 1797 0xf4e61bf4,
1787 0xb605d407, 1798 0x28f4dd02,
1788 0x01d00604, 1799 0xbb0ef400,
1789/* 0x0be3: idle_loop */ 1800 0x00000000,
1790 0xf004bd00, 1801 0x00000000,
1791 0x32f45817, 1802 0x00000000,
1792/* 0x0be9: idle_proc */
1793/* 0x0be9: idle_proc_exec */
1794 0xb910f902,
1795 0x21f5021e,
1796 0x10fc034b,
1797 0xf40911f4,
1798 0x0ef40231,
1799/* 0x0bfd: idle_proc_next */
1800 0x5810b6ef,
1801 0xf4061fb8,
1802 0x02f4e61b,
1803 0x0028f4dd,
1804 0x00bb0ef4,
1805 0x00000000, 1803 0x00000000,
1806 0x00000000, 1804 0x00000000,
1807 0x00000000, 1805 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index 31552af9b06e..7bf6b39ed205 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -24,8 +24,8 @@ uint32_t gf119_pmu_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x0000049d, 27 0x00000492,
28 0x00000446, 28 0x0000043b,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t gf119_pmu_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000068b, 49 0x00000680,
50 0x0000067d, 50 0x00000672,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t gf119_pmu_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x0000068f, 71 0x00000684,
72 0x0000068d, 72 0x00000682,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t gf119_pmu_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000aaa, 93 0x00000a9f,
94 0x0000094d, 94 0x00000942,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t gf119_pmu_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000acd, 115 0x00000ac2,
116 0x00000aac, 116 0x00000aa1,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t gf119_pmu_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000ad9, 137 0x00000ace,
138 0x00000ad7, 138 0x00000acc,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -229,26 +229,26 @@ uint32_t gf119_pmu_data[] = {
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00000001, 230 0x00000001,
231 0x00000000, 231 0x00000000,
232 0x000004d3, 232 0x000004c8,
233/* 0x037c: memx_func_next */ 233/* 0x037c: memx_func_next */
234 0x00000002, 234 0x00000002,
235 0x00000000, 235 0x00000000,
236 0x00000554, 236 0x00000549,
237 0x00000003, 237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x000005d8, 239 0x000005cd,
240 0x00040004, 240 0x00040004,
241 0x00000000, 241 0x00000000,
242 0x000005f4, 242 0x000005e9,
243 0x00010005, 243 0x00010005,
244 0x00000000, 244 0x00000000,
245 0x0000060e, 245 0x00000603,
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x000005d3, 248 0x000005c8,
249 0x00000007, 249 0x00000007,
250 0x00000000, 250 0x00000000,
251 0x00000619, 251 0x0000060e,
252/* 0x03c4: memx_func_tail */ 252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */ 253/* 0x03c4: memx_ts_start */
254 0x00000000, 254 0x00000000,
@@ -916,7 +916,7 @@ uint32_t gf119_pmu_data[] = {
916}; 916};
917 917
918uint32_t gf119_pmu_code[] = { 918uint32_t gf119_pmu_code[] = {
919 0x034d0ef5, 919 0x03420ef5,
920/* 0x0004: rd32 */ 920/* 0x0004: rd32 */
921 0x07a007f1, 921 0x07a007f1,
922 0xbd000ed0, 922 0xbd000ed0,
@@ -977,7 +977,7 @@ uint32_t gf119_pmu_code[] = {
977 0xbb9a0a98, 977 0xbb9a0a98,
978 0x1cf4029a, 978 0x1cf4029a,
979 0x01d7f00f, 979 0x01d7f00f,
980 0x028c21f5, 980 0x028121f5,
981 0x0ef494bd, 981 0x0ef494bd,
982/* 0x00d5: intr_watchdog_next_time */ 982/* 0x00d5: intr_watchdog_next_time */
983 0x9b0a9815, 983 0x9b0a9815,
@@ -1025,716 +1025,714 @@ uint32_t gf119_pmu_code[] = {
1025 0xf14f48e7, 1025 0xf14f48e7,
1026 0xf05453e3, 1026 0xf05453e3,
1027 0x21f500d7, 1027 0x21f500d7,
1028 0xc0fc02f1, 1028 0xc0fc02e6,
1029 0x04c007f1, 1029 0x04c007f1,
1030 0xbd000cd0, 1030 0xbd000cd0,
1031/* 0x0185: intr_subintr_skip_fifo */ 1031/* 0x0185: intr_subintr_skip_fifo */
1032 0x8807f104, 1032 0x8807f104,
1033 0x0009d006, 1033 0x0009d006,
1034/* 0x018e: intr_skip_subintr */ 1034/* 0x018e: intr_skip_subintr */
1035 0x89c404bd, 1035 0x97f104bd,
1036 0x070bf420, 1036 0x90bd00e0,
1037 0xffbfa4f1, 1037 0xf00489fd,
1038/* 0x0198: intr_skip_pause */ 1038 0x08d00407,
1039 0xf44089c4, 1039 0xfc04bd00,
1040 0xa4f1070b, 1040 0x0088fe80,
1041/* 0x01a2: intr_skip_user0 */ 1041 0xe0fcf0fc,
1042 0x07f0ffbf, 1042 0xc0fcd0fc,
1043 0x0008d004, 1043 0xa0fcb0fc,
1044 0x80fc04bd, 1044 0x80fc90fc,
1045 0xfc0088fe, 1045 0x32f400fc,
1046 0xfce0fcf0, 1046/* 0x01bb: ticks_from_ns */
1047 0xfcc0fcd0, 1047 0xf901f800,
1048 0xfca0fcb0,
1049 0xfc80fc90,
1050 0x0032f400,
1051/* 0x01c6: ticks_from_ns */
1052 0xc0f901f8,
1053 0xd7f1b0f9,
1054 0xd3f00144,
1055 0xb321f500,
1056 0xe8ccec03,
1057 0x00b4b003,
1058 0xec120bf4,
1059 0xf103e8ee,
1060 0xf00144d7,
1061 0x21f500d3,
1062/* 0x01ee: ticks_from_ns_quit */
1063 0xceb903b3,
1064 0xfcb0fc02,
1065/* 0x01f7: ticks_from_us */
1066 0xf900f8c0,
1067 0xf1b0f9c0, 1048 0xf1b0f9c0,
1068 0xf00144d7, 1049 0xf00144d7,
1069 0x21f500d3, 1050 0x21f500d3,
1070 0xceb903b3, 1051 0xccec03a8,
1071 0x00b4b002, 1052 0xb4b003e8,
1072 0xbd050bf4, 1053 0x120bf400,
1073/* 0x0211: ticks_from_us_quit */ 1054 0x03e8eeec,
1074 0xfcb0fce4, 1055 0x0144d7f1,
1075/* 0x0217: ticks_to_us */ 1056 0xf500d3f0,
1076 0xf100f8c0, 1057/* 0x01e3: ticks_from_ns_quit */
1077 0xf00144d7, 1058 0xb903a821,
1078 0xedff00d3, 1059 0xb0fc02ce,
1079/* 0x0223: timer */ 1060 0x00f8c0fc,
1080 0xf900f8ec, 1061/* 0x01ec: ticks_from_us */
1081 0xf480f990, 1062 0xb0f9c0f9,
1082 0xf8981032, 1063 0x0144d7f1,
1083 0x0086b003, 1064 0xf500d3f0,
1084 0xbd531cf4, 1065 0xb903a821,
1085 0x3807f084, 1066 0xb4b002ce,
1086 0xbd0008d0, 1067 0x050bf400,
1087 0x3487f004, 1068/* 0x0206: ticks_from_us_quit */
1088 0x980088cf, 1069 0xb0fce4bd,
1089 0x98bb9a09, 1070 0x00f8c0fc,
1090 0x00e9bb02, 1071/* 0x020c: ticks_to_us */
1091 0xf003fe80, 1072 0x0144d7f1,
1092 0x88cf0887, 1073 0xff00d3f0,
1093 0x0284f000, 1074 0x00f8eced,
1094 0xf0201bf4, 1075/* 0x0218: timer */
1095 0x88cf3487, 1076 0x80f990f9,
1096 0x06e0b800, 1077 0x981032f4,
1097 0xb8090bf4, 1078 0x86b003f8,
1098 0x1cf406e8, 1079 0x531cf400,
1099/* 0x026d: timer_reset */ 1080 0x07f084bd,
1100 0x3407f00e, 1081 0x0008d038,
1101 0xbd000ed0, 1082 0x87f004bd,
1102 0x9a0e8004, 1083 0x0088cf34,
1103/* 0x0278: timer_enable */ 1084 0xbb9a0998,
1104 0xf00187f0, 1085 0xe9bb0298,
1105 0x08d03807, 1086 0x03fe8000,
1106/* 0x0283: timer_done */ 1087 0xcf0887f0,
1107 0xf404bd00, 1088 0x84f00088,
1108 0x80fc1031, 1089 0x201bf402,
1109 0x00f890fc, 1090 0xcf3487f0,
1110/* 0x028c: send_proc */ 1091 0xe0b80088,
1111 0x90f980f9, 1092 0x090bf406,
1112 0x9805e898, 1093 0xf406e8b8,
1113 0x86f004e9, 1094/* 0x0262: timer_reset */
1114 0x0689b804, 1095 0x07f00e1c,
1115 0xc42a0bf4, 1096 0x000ed034,
1116 0x88940398, 1097 0x0e8004bd,
1117 0x1880b604, 1098/* 0x026d: timer_enable */
1118 0x98008ebb, 1099 0x0187f09a,
1119 0x8a8000fa, 1100 0xd03807f0,
1120 0x018d8000, 1101 0x04bd0008,
1121 0x80028c80, 1102/* 0x0278: timer_done */
1122 0x90b6038b, 1103 0xfc1031f4,
1123 0x0794f001,
1124 0xf404e980,
1125/* 0x02c6: send_done */
1126 0x90fc0231,
1127 0x00f880fc,
1128/* 0x02cc: find */
1129 0x87f080f9,
1130 0x0131f458,
1131/* 0x02d4: find_loop */
1132 0xb8008a98,
1133 0x0bf406ae,
1134 0x5880b610,
1135 0x026886b1,
1136 0xf4f01bf4,
1137/* 0x02ea: find_done */
1138 0x8eb90132,
1139 0xf880fc02,
1140/* 0x02f1: send */
1141 0xcc21f500,
1142 0x9701f402,
1143/* 0x02fa: recv */
1144 0x90f900f8,
1145 0xe89880f9,
1146 0x04e99805,
1147 0xb80132f4,
1148 0x0bf40689,
1149 0x0389c43d,
1150 0xf00180b6,
1151 0xe8800784,
1152 0x02ea9805,
1153 0x8ffef0f9,
1154 0xb9f0f901,
1155 0x999402ef,
1156 0x00e9bb04,
1157 0x9818e0b6,
1158 0xec9803eb,
1159 0x01ed9802,
1160 0xf900ee98,
1161 0xfef0fca5,
1162 0x31f400f8,
1163/* 0x0347: recv_done */
1164 0xfcf0fc01,
1165 0xf890fc80, 1104 0xf890fc80,
1166/* 0x034d: init */ 1105/* 0x0281: send_proc */
1167 0x0817f100, 1106 0xf980f900,
1168 0x0011cf01, 1107 0x05e89890,
1169 0x010911e7, 1108 0xf004e998,
1170 0xfe0814b6, 1109 0x89b80486,
1171 0x17f10014, 1110 0x2a0bf406,
1172 0x13f000e0, 1111 0x940398c4,
1173 0x1c07f000, 1112 0x80b60488,
1113 0x008ebb18,
1114 0x8000fa98,
1115 0x8d80008a,
1116 0x028c8001,
1117 0xb6038b80,
1118 0x94f00190,
1119 0x04e98007,
1120/* 0x02bb: send_done */
1121 0xfc0231f4,
1122 0xf880fc90,
1123/* 0x02c1: find */
1124 0xf080f900,
1125 0x31f45887,
1126/* 0x02c9: find_loop */
1127 0x008a9801,
1128 0xf406aeb8,
1129 0x80b6100b,
1130 0x6886b158,
1131 0xf01bf402,
1132/* 0x02df: find_done */
1133 0xb90132f4,
1134 0x80fc028e,
1135/* 0x02e6: send */
1136 0x21f500f8,
1137 0x01f402c1,
1138/* 0x02ef: recv */
1139 0xf900f897,
1140 0x9880f990,
1141 0xe99805e8,
1142 0x0132f404,
1143 0xf40689b8,
1144 0x89c43d0b,
1145 0x0180b603,
1146 0x800784f0,
1147 0xea9805e8,
1148 0xfef0f902,
1149 0xf0f9018f,
1150 0x9402efb9,
1151 0xe9bb0499,
1152 0x18e0b600,
1153 0x9803eb98,
1154 0xed9802ec,
1155 0x00ee9801,
1156 0xf0fca5f9,
1157 0xf400f8fe,
1158 0xf0fc0131,
1159/* 0x033c: recv_done */
1160 0x90fc80fc,
1161/* 0x0342: init */
1162 0x17f100f8,
1163 0x11cf0108,
1164 0x0911e700,
1165 0x0814b601,
1166 0xf10014fe,
1167 0xf000e017,
1168 0x07f00013,
1169 0x0001d01c,
1170 0x17f004bd,
1171 0x1407f0ff,
1174 0xbd0001d0, 1172 0xbd0001d0,
1175 0xff17f004, 1173 0x0217f004,
1176 0xd01407f0, 1174 0x080015f1,
1175 0xd01007f0,
1177 0x04bd0001, 1176 0x04bd0001,
1178 0xf10217f0, 1177 0x00f617f1,
1179 0xf0080015, 1178 0xfe0013f0,
1180 0x01d01007, 1179 0x31f40010,
1181 0xf104bd00, 1180 0x0117f010,
1182 0xf000f617, 1181 0xd03807f0,
1183 0x10fe0013, 1182 0x04bd0001,
1184 0x1031f400, 1183/* 0x0397: init_proc */
1185 0xf00117f0, 1184 0x9858f7f0,
1186 0x01d03807, 1185 0x16b001f1,
1187 0xf004bd00, 1186 0xfa0bf400,
1188/* 0x03a2: init_proc */ 1187 0xf0b615f9,
1189 0xf19858f7, 1188 0xf20ef458,
1190 0x0016b001, 1189/* 0x03a8: mulu32_32_64 */
1191 0xf9fa0bf4, 1190 0x20f910f9,
1192 0x58f0b615, 1191 0x40f930f9,
1193/* 0x03b3: mulu32_32_64 */ 1192 0x9510e195,
1194 0xf9f20ef4, 1193 0xc4bd10d2,
1195 0xf920f910, 1194 0xedffb4bd,
1196 0x9540f930, 1195 0x301dffc0,
1197 0xd29510e1, 1196 0xf10234b9,
1198 0xbdc4bd10, 1197 0xb6ffff34,
1199 0xc0edffb4, 1198 0x45b61034,
1200 0xb9301dff, 1199 0x00c3bb10,
1201 0x34f10234, 1200 0xff01b4bb,
1202 0x34b6ffff, 1201 0x34b930e2,
1203 0x1045b610, 1202 0xff34f102,
1204 0xbb00c3bb, 1203 0x1034b6ff,
1205 0xe2ff01b4, 1204 0xbb1045b6,
1206 0x0234b930, 1205 0xb4bb00c3,
1207 0xffff34f1, 1206 0x3012ff01,
1208 0xb61034b6, 1207 0xfc00b3bb,
1209 0xc3bb1045, 1208 0xfc30fc40,
1210 0x01b4bb00, 1209 0xf810fc20,
1211 0xbb3012ff, 1210/* 0x03f9: host_send */
1212 0x40fc00b3, 1211 0xb017f100,
1213 0x20fc30fc, 1212 0x0011cf04,
1214 0x00f810fc, 1213 0x04a027f1,
1215/* 0x0404: host_send */ 1214 0xb80022cf,
1216 0x04b017f1, 1215 0x0bf40612,
1217 0xf10011cf, 1216 0x071ec42f,
1218 0xcf04a027, 1217 0xb704ee94,
1219 0x12b80022, 1218 0x980270e0,
1220 0x2f0bf406, 1219 0xec9803eb,
1221 0x94071ec4, 1220 0x01ed9802,
1222 0xe0b704ee, 1221 0xf500ee98,
1223 0xeb980270, 1222 0xb602e621,
1224 0x02ec9803, 1223 0x1ec40110,
1225 0x9801ed98, 1224 0xb007f10f,
1226 0x21f500ee, 1225 0x000ed004,
1227 0x10b602f1, 1226 0x0ef404bd,
1228 0x0f1ec401, 1227/* 0x0439: host_send_done */
1229 0x04b007f1, 1228/* 0x043b: host_recv */
1230 0xbd000ed0, 1229 0xf100f8c3,
1231 0xc30ef404, 1230 0xf14e4917,
1232/* 0x0444: host_send_done */ 1231 0xb8525413,
1233/* 0x0446: host_recv */ 1232 0x0bf406e1,
1234 0x17f100f8, 1233/* 0x0449: host_recv_wait */
1235 0x13f14e49, 1234 0xcc17f1b3,
1236 0xe1b85254, 1235 0x0011cf04,
1237 0xb30bf406, 1236 0x04c827f1,
1238/* 0x0454: host_recv_wait */ 1237 0xf00022cf,
1239 0x04cc17f1, 1238 0x12b80816,
1240 0xf10011cf, 1239 0xec0bf406,
1241 0xcf04c827, 1240 0xb60723c4,
1242 0x16f00022, 1241 0x30b70434,
1243 0x0612b808, 1242 0x3b8002f0,
1244 0xc4ec0bf4, 1243 0x023c8003,
1245 0x34b60723, 1244 0x80013d80,
1246 0xf030b704, 1245 0x20b6003e,
1247 0x033b8002, 1246 0x0f24f001,
1248 0x80023c80, 1247 0x04c807f1,
1249 0x3e80013d, 1248 0xbd0002d0,
1250 0x0120b600, 1249 0x4027f004,
1251 0xf10f24f0, 1250 0xd00007f0,
1252 0xd004c807,
1253 0x04bd0002, 1251 0x04bd0002,
1254 0xf04027f0, 1252/* 0x0492: host_init */
1255 0x02d00007, 1253 0x17f100f8,
1256 0xf804bd00, 1254 0x14b60080,
1257/* 0x049d: host_init */ 1255 0x7015f110,
1258 0x8017f100, 1256 0xd007f102,
1259 0x1014b600, 1257 0x0001d004,
1260 0x027015f1, 1258 0x17f104bd,
1261 0x04d007f1, 1259 0x14b60080,
1262 0xbd0001d0, 1260 0xf015f110,
1263 0x8017f104, 1261 0xdc07f102,
1264 0x1014b600, 1262 0x0001d004,
1265 0x02f015f1, 1263 0x17f004bd,
1266 0x04dc07f1, 1264 0xc407f101,
1267 0xbd0001d0, 1265 0x0001d004,
1268 0x0117f004, 1266 0x00f804bd,
1269 0x04c407f1, 1267/* 0x04c8: memx_func_enter */
1270 0xbd0001d0, 1268 0x162067f1,
1271/* 0x04d3: memx_func_enter */ 1269 0xf55d77f1,
1272 0xf100f804, 1270 0xffff73f1,
1273 0xf1162067, 1271 0xf4026eb9,
1274 0xf1f55d77, 1272 0xd8b90421,
1273 0x0487fd02,
1274 0x80f960f9,
1275 0xe0fcd0fc,
1276 0xf13321f4,
1277 0xf1fffe77,
1275 0xb9ffff73, 1278 0xb9ffff73,
1276 0x21f4026e, 1279 0x21f4026e,
1277 0x02d8b904, 1280 0x02d8b904,
1278 0xf90487fd, 1281 0xf90487fd,
1279 0xfc80f960, 1282 0xfc80f960,
1280 0xf4e0fcd0, 1283 0xf4e0fcd0,
1281 0x77f13321, 1284 0x67f13321,
1282 0x73f1fffe, 1285 0x6eb926f0,
1283 0x6eb9ffff,
1284 0x0421f402, 1286 0x0421f402,
1285 0xfd02d8b9, 1287 0xfd02d8b9,
1286 0x60f90487, 1288 0x60f90487,
1287 0xd0fc80f9, 1289 0xd0fc80f9,
1288 0x21f4e0fc, 1290 0x21f4e0fc,
1289 0xf067f133, 1291 0x0467f033,
1290 0x026eb926, 1292 0x07e007f1,
1291 0xb90421f4,
1292 0x87fd02d8,
1293 0xf960f904,
1294 0xfcd0fc80,
1295 0x3321f4e0,
1296 0xf10467f0,
1297 0xd007e007,
1298 0x04bd0006,
1299/* 0x053c: memx_func_enter_wait */
1300 0x07c067f1,
1301 0xf00066cf,
1302 0x0bf40464,
1303 0x2c67f0f6,
1304 0x800066cf,
1305 0x00f8f106,
1306/* 0x0554: memx_func_leave */
1307 0xcf2c67f0,
1308 0x06800066,
1309 0x0467f0f2,
1310 0x07e407f1,
1311 0xbd0006d0, 1293 0xbd0006d0,
1312/* 0x0569: memx_func_leave_wait */ 1294/* 0x0531: memx_func_enter_wait */
1313 0xc067f104, 1295 0xc067f104,
1314 0x0066cf07, 1296 0x0066cf07,
1315 0xf40464f0, 1297 0xf40464f0,
1316 0x67f1f61b, 1298 0x67f0f60b,
1317 0x77f126f0, 1299 0x0066cf2c,
1318 0x73f00001, 1300 0xf8f10680,
1319 0x026eb900, 1301/* 0x0549: memx_func_leave */
1320 0xb90421f4, 1302 0x2c67f000,
1321 0x87fd02d8, 1303 0x800066cf,
1322 0xf960f905, 1304 0x67f0f206,
1323 0xfcd0fc80, 1305 0xe407f104,
1324 0x3321f4e0, 1306 0x0006d007,
1325 0x162067f1, 1307/* 0x055e: memx_func_leave_wait */
1326 0xf4026eb9, 1308 0x67f104bd,
1327 0xd8b90421, 1309 0x66cf07c0,
1328 0x0587fd02, 1310 0x0464f000,
1329 0x80f960f9, 1311 0xf1f61bf4,
1330 0xe0fcd0fc, 1312 0xf126f067,
1331 0xf13321f4, 1313 0xf0000177,
1332 0xf00aa277,
1333 0x6eb90073, 1314 0x6eb90073,
1334 0x0421f402, 1315 0x0421f402,
1335 0xfd02d8b9, 1316 0xfd02d8b9,
1336 0x60f90587, 1317 0x60f90587,
1337 0xd0fc80f9, 1318 0xd0fc80f9,
1338 0x21f4e0fc, 1319 0x21f4e0fc,
1339/* 0x05d3: memx_func_wait_vblank */ 1320 0x2067f133,
1340 0xb600f833, 1321 0x026eb916,
1341 0x00f80410, 1322 0xb90421f4,
1342/* 0x05d8: memx_func_wr32 */ 1323 0x87fd02d8,
1343 0x98001698, 1324 0xf960f905,
1344 0x10b60115, 1325 0xfcd0fc80,
1345 0xf960f908,
1346 0xfcd0fc50,
1347 0x3321f4e0, 1326 0x3321f4e0,
1348 0xf40242b6, 1327 0x0aa277f1,
1349 0x00f8e91b, 1328 0xb90073f0,
1350/* 0x05f4: memx_func_wait */ 1329 0x21f4026e,
1351 0xcf2c87f0, 1330 0x02d8b904,
1352 0x1e980088, 1331 0xf90587fd,
1353 0x011d9800, 1332 0xfc80f960,
1354 0x98021c98, 1333 0xf4e0fcd0,
1355 0x10b6031b, 1334 0x00f83321,
1356 0x8621f410, 1335/* 0x05c8: memx_func_wait_vblank */
1357/* 0x060e: memx_func_delay */ 1336 0xf80410b6,
1358 0x1e9800f8, 1337/* 0x05cd: memx_func_wr32 */
1338 0x00169800,
1339 0xb6011598,
1340 0x60f90810,
1341 0xd0fc50f9,
1342 0x21f4e0fc,
1343 0x0242b633,
1344 0xf8e91bf4,
1345/* 0x05e9: memx_func_wait */
1346 0x2c87f000,
1347 0x980088cf,
1348 0x1d98001e,
1349 0x021c9801,
1350 0xb6031b98,
1351 0x21f41010,
1352/* 0x0603: memx_func_delay */
1353 0x9800f886,
1354 0x10b6001e,
1355 0x6721f404,
1356/* 0x060e: memx_func_train */
1357 0x00f800f8,
1358/* 0x0610: memx_exec */
1359 0xd0f9e0f9,
1360 0xb902c1b9,
1361/* 0x061a: memx_exec_next */
1362 0x139802b2,
1359 0x0410b600, 1363 0x0410b600,
1360 0xf86721f4, 1364 0x01f034e7,
1361/* 0x0619: memx_func_train */ 1365 0x01e033e7,
1362/* 0x061b: memx_exec */ 1366 0xf00132b6,
1363 0xf900f800, 1367 0x35980c30,
1364 0xb9d0f9e0, 1368 0xb855f9de,
1365 0xb2b902c1, 1369 0x1ef40612,
1366/* 0x0625: memx_exec_next */ 1370 0xf10b98e4,
1367 0x00139802, 1371 0xbbf20c98,
1368 0xe70410b6, 1372 0xb7f102cb,
1369 0xe701f034, 1373 0xbbcf07c4,
1370 0xb601e033, 1374 0xfcd0fc00,
1371 0x30f00132, 1375 0xe621f5e0,
1372 0xde35980c, 1376/* 0x0653: memx_info */
1373 0x12b855f9, 1377 0x7000f802,
1374 0xe41ef406, 1378 0x0bf401c6,
1375 0x98f10b98, 1379/* 0x0659: memx_info_data */
1376 0xcbbbf20c, 1380 0xccc7f10e,
1377 0xc4b7f102, 1381 0x00b7f103,
1378 0x00bbcf07, 1382 0x0b0ef408,
1379 0xe0fcd0fc, 1383/* 0x0664: memx_info_train */
1380 0x02f121f5, 1384 0x0bccc7f1,
1381/* 0x065e: memx_info */ 1385 0x0100b7f1,
1382 0xc67000f8, 1386/* 0x066c: memx_info_send */
1383 0x0e0bf401, 1387 0x02e621f5,
1384/* 0x0664: memx_info_data */ 1388/* 0x0672: memx_recv */
1385 0x03ccc7f1, 1389 0xd6b000f8,
1386 0x0800b7f1, 1390 0x9b0bf401,
1387/* 0x066f: memx_info_train */ 1391 0xf400d6b0,
1388 0xf10b0ef4, 1392 0x00f8d80b,
1389 0xf10bccc7, 1393/* 0x0680: memx_init */
1390/* 0x0677: memx_info_send */ 1394/* 0x0682: perf_recv */
1391 0xf50100b7, 1395 0x00f800f8,
1392 0xf802f121, 1396/* 0x0684: perf_init */
1393/* 0x067d: memx_recv */ 1397/* 0x0686: i2c_drive_scl */
1394 0x01d6b000, 1398 0x36b000f8,
1395 0xb09b0bf4, 1399 0x0e0bf400,
1396 0x0bf400d6, 1400 0x07e007f1,
1397/* 0x068b: memx_init */ 1401 0xbd0001d0,
1398 0xf800f8d8, 1402/* 0x0697: i2c_drive_scl_lo */
1399/* 0x068d: perf_recv */ 1403 0xf100f804,
1400/* 0x068f: perf_init */ 1404 0xd007e407,
1401 0xf800f800,
1402/* 0x0691: i2c_drive_scl */
1403 0x0036b000,
1404 0xf10e0bf4,
1405 0xd007e007,
1406 0x04bd0001, 1405 0x04bd0001,
1407/* 0x06a2: i2c_drive_scl_lo */ 1406/* 0x06a2: i2c_drive_sda */
1408 0x07f100f8, 1407 0x36b000f8,
1409 0x01d007e4, 1408 0x0e0bf400,
1410 0xf804bd00, 1409 0x07e007f1,
1411/* 0x06ad: i2c_drive_sda */ 1410 0xbd0002d0,
1412 0x0036b000, 1411/* 0x06b3: i2c_drive_sda_lo */
1413 0xf10e0bf4, 1412 0xf100f804,
1414 0xd007e007, 1413 0xd007e407,
1415 0x04bd0002, 1414 0x04bd0002,
1416/* 0x06be: i2c_drive_sda_lo */ 1415/* 0x06be: i2c_sense_scl */
1417 0x07f100f8,
1418 0x02d007e4,
1419 0xf804bd00,
1420/* 0x06c9: i2c_sense_scl */
1421 0x0132f400,
1422 0x07c437f1,
1423 0xfd0033cf,
1424 0x0bf40431,
1425 0x0131f406,
1426/* 0x06dc: i2c_sense_scl_done */
1427/* 0x06de: i2c_sense_sda */
1428 0x32f400f8, 1416 0x32f400f8,
1429 0xc437f101, 1417 0xc437f101,
1430 0x0033cf07, 1418 0x0033cf07,
1431 0xf40432fd, 1419 0xf40431fd,
1432 0x31f4060b, 1420 0x31f4060b,
1433/* 0x06f1: i2c_sense_sda_done */ 1421/* 0x06d1: i2c_sense_scl_done */
1434/* 0x06f3: i2c_raise_scl */ 1422/* 0x06d3: i2c_sense_sda */
1435 0xf900f801, 1423 0xf400f801,
1436 0x9847f140, 1424 0x37f10132,
1437 0x0137f008, 1425 0x33cf07c4,
1438 0x069121f5, 1426 0x0432fd00,
1439/* 0x0700: i2c_raise_scl_wait */ 1427 0xf4060bf4,
1440 0x03e8e7f1, 1428/* 0x06e6: i2c_sense_sda_done */
1441 0xf56721f4, 1429 0x00f80131,
1442 0xf406c921, 1430/* 0x06e8: i2c_raise_scl */
1443 0x42b60901, 1431 0x47f140f9,
1444 0xef1bf401, 1432 0x37f00898,
1445/* 0x0714: i2c_raise_scl_done */ 1433 0x8621f501,
1446 0x00f840fc, 1434/* 0x06f5: i2c_raise_scl_wait */
1447/* 0x0718: i2c_start */ 1435 0xe8e7f106,
1448 0x06c921f5, 1436 0x6721f403,
1449 0xf50d11f4, 1437 0x06be21f5,
1450 0xf406de21, 1438 0xb60901f4,
1451 0x0ef40611, 1439 0x1bf40142,
1452/* 0x0729: i2c_start_rep */ 1440/* 0x0709: i2c_raise_scl_done */
1453 0x0037f030, 1441 0xf840fcef,
1454 0x069121f5, 1442/* 0x070d: i2c_start */
1443 0xbe21f500,
1444 0x0d11f406,
1445 0x06d321f5,
1446 0xf40611f4,
1447/* 0x071e: i2c_start_rep */
1448 0x37f0300e,
1449 0x8621f500,
1450 0x0137f006,
1451 0x06a221f5,
1452 0xb60076bb,
1453 0x50f90465,
1454 0xbb046594,
1455 0x50bd0256,
1456 0xfc0475fd,
1457 0xe821f550,
1458 0x0464b606,
1459/* 0x074b: i2c_start_send */
1460 0xf01f11f4,
1461 0x21f50037,
1462 0xe7f106a2,
1463 0x21f41388,
1464 0x0037f067,
1465 0x068621f5,
1466 0x1388e7f1,
1467/* 0x0767: i2c_start_out */
1468 0xf86721f4,
1469/* 0x0769: i2c_stop */
1470 0x0037f000,
1471 0x068621f5,
1472 0xf50037f0,
1473 0xf106a221,
1474 0xf403e8e7,
1475 0x37f06721,
1476 0x8621f501,
1477 0x88e7f106,
1478 0x6721f413,
1455 0xf50137f0, 1479 0xf50137f0,
1456 0xbb06ad21, 1480 0xf106a221,
1481 0xf41388e7,
1482 0x00f86721,
1483/* 0x079c: i2c_bitw */
1484 0x06a221f5,
1485 0x03e8e7f1,
1486 0xbb6721f4,
1457 0x65b60076, 1487 0x65b60076,
1458 0x9450f904, 1488 0x9450f904,
1459 0x56bb0465, 1489 0x56bb0465,
1460 0xfd50bd02, 1490 0xfd50bd02,
1461 0x50fc0475, 1491 0x50fc0475,
1462 0x06f321f5, 1492 0x06e821f5,
1463 0xf40464b6, 1493 0xf40464b6,
1464/* 0x0756: i2c_start_send */ 1494 0xe7f11811,
1465 0x37f01f11,
1466 0xad21f500,
1467 0x88e7f106,
1468 0x6721f413,
1469 0xf50037f0,
1470 0xf1069121,
1471 0xf41388e7,
1472/* 0x0772: i2c_start_out */
1473 0x00f86721,
1474/* 0x0774: i2c_stop */
1475 0xf50037f0,
1476 0xf0069121,
1477 0x21f50037,
1478 0xe7f106ad,
1479 0x21f403e8,
1480 0x0137f067,
1481 0x069121f5,
1482 0x1388e7f1,
1483 0xf06721f4,
1484 0x21f50137,
1485 0xe7f106ad,
1486 0x21f41388, 1495 0x21f41388,
1487/* 0x07a7: i2c_bitw */ 1496 0x0037f067,
1488 0xf500f867, 1497 0x068621f5,
1489 0xf106ad21,
1490 0xf403e8e7,
1491 0x76bb6721,
1492 0x0465b600,
1493 0x659450f9,
1494 0x0256bb04,
1495 0x75fd50bd,
1496 0xf550fc04,
1497 0xb606f321,
1498 0x11f40464,
1499 0x88e7f118,
1500 0x6721f413,
1501 0xf50037f0,
1502 0xf1069121,
1503 0xf41388e7,
1504/* 0x07e6: i2c_bitw_out */
1505 0x00f86721,
1506/* 0x07e8: i2c_bitr */
1507 0xf50137f0,
1508 0xf106ad21,
1509 0xf403e8e7,
1510 0x76bb6721,
1511 0x0465b600,
1512 0x659450f9,
1513 0x0256bb04,
1514 0x75fd50bd,
1515 0xf550fc04,
1516 0xb606f321,
1517 0x11f40464,
1518 0xde21f51b,
1519 0x0037f006,
1520 0x069121f5,
1521 0x1388e7f1, 1498 0x1388e7f1,
1522 0xf06721f4, 1499/* 0x07db: i2c_bitw_out */
1523 0x31f4013c, 1500 0xf86721f4,
1524/* 0x082d: i2c_bitr_done */ 1501/* 0x07dd: i2c_bitr */
1525/* 0x082f: i2c_get_byte */ 1502 0x0137f000,
1526 0xf000f801, 1503 0x06a221f5,
1527 0x47f00057, 1504 0x03e8e7f1,
1528/* 0x0835: i2c_get_byte_next */ 1505 0xbb6721f4,
1529 0x0154b608, 1506 0x65b60076,
1507 0x9450f904,
1508 0x56bb0465,
1509 0xfd50bd02,
1510 0x50fc0475,
1511 0x06e821f5,
1512 0xf40464b6,
1513 0x21f51b11,
1514 0x37f006d3,
1515 0x8621f500,
1516 0x88e7f106,
1517 0x6721f413,
1518 0xf4013cf0,
1519/* 0x0822: i2c_bitr_done */
1520 0x00f80131,
1521/* 0x0824: i2c_get_byte */
1522 0xf00057f0,
1523/* 0x082a: i2c_get_byte_next */
1524 0x54b60847,
1525 0x0076bb01,
1526 0xf90465b6,
1527 0x04659450,
1528 0xbd0256bb,
1529 0x0475fd50,
1530 0x21f550fc,
1531 0x64b607dd,
1532 0x2b11f404,
1533 0xb60553fd,
1534 0x1bf40142,
1535 0x0137f0d8,
1530 0xb60076bb, 1536 0xb60076bb,
1531 0x50f90465, 1537 0x50f90465,
1532 0xbb046594, 1538 0xbb046594,
1533 0x50bd0256, 1539 0x50bd0256,
1534 0xfc0475fd, 1540 0xfc0475fd,
1535 0xe821f550, 1541 0x9c21f550,
1536 0x0464b607, 1542 0x0464b607,
1537 0xfd2b11f4, 1543/* 0x0874: i2c_get_byte_done */
1538 0x42b60553, 1544/* 0x0876: i2c_put_byte */
1539 0xd81bf401, 1545 0x47f000f8,
1540 0xbb0137f0, 1546/* 0x0879: i2c_put_byte_next */
1547 0x0142b608,
1548 0xbb3854ff,
1541 0x65b60076, 1549 0x65b60076,
1542 0x9450f904, 1550 0x9450f904,
1543 0x56bb0465, 1551 0x56bb0465,
1544 0xfd50bd02, 1552 0xfd50bd02,
1545 0x50fc0475, 1553 0x50fc0475,
1546 0x07a721f5, 1554 0x079c21f5,
1547/* 0x087f: i2c_get_byte_done */
1548 0xf80464b6,
1549/* 0x0881: i2c_put_byte */
1550 0x0847f000,
1551/* 0x0884: i2c_put_byte_next */
1552 0xff0142b6,
1553 0x76bb3854,
1554 0x0465b600,
1555 0x659450f9,
1556 0x0256bb04,
1557 0x75fd50bd,
1558 0xf550fc04,
1559 0xb607a721,
1560 0x11f40464,
1561 0x0046b034,
1562 0xbbd81bf4,
1563 0x65b60076,
1564 0x9450f904,
1565 0x56bb0465,
1566 0xfd50bd02,
1567 0x50fc0475,
1568 0x07e821f5,
1569 0xf40464b6, 1555 0xf40464b6,
1570 0x76bb0f11, 1556 0x46b03411,
1571 0x0136b000, 1557 0xd81bf400,
1572 0xf4061bf4,
1573/* 0x08da: i2c_put_byte_done */
1574 0x00f80132,
1575/* 0x08dc: i2c_addr */
1576 0xb60076bb, 1558 0xb60076bb,
1577 0x50f90465, 1559 0x50f90465,
1578 0xbb046594, 1560 0xbb046594,
1579 0x50bd0256, 1561 0x50bd0256,
1580 0xfc0475fd, 1562 0xfc0475fd,
1581 0x1821f550, 1563 0xdd21f550,
1582 0x0464b607, 1564 0x0464b607,
1583 0xe72911f4, 1565 0xbb0f11f4,
1584 0xb6012ec3, 1566 0x36b00076,
1585 0x53fd0134, 1567 0x061bf401,
1586 0x0076bb05, 1568/* 0x08cf: i2c_put_byte_done */
1569 0xf80132f4,
1570/* 0x08d1: i2c_addr */
1571 0x0076bb00,
1587 0xf90465b6, 1572 0xf90465b6,
1588 0x04659450, 1573 0x04659450,
1589 0xbd0256bb, 1574 0xbd0256bb,
1590 0x0475fd50, 1575 0x0475fd50,
1591 0x21f550fc, 1576 0x21f550fc,
1592 0x64b60881, 1577 0x64b6070d,
1593/* 0x0921: i2c_addr_done */ 1578 0x2911f404,
1594/* 0x0923: i2c_acquire_addr */ 1579 0x012ec3e7,
1595 0xc700f804, 1580 0xfd0134b6,
1596 0xe4b6f8ce, 1581 0x76bb0553,
1597 0x14e0b705, 1582 0x0465b600,
1598/* 0x092f: i2c_acquire */ 1583 0x659450f9,
1599 0xf500f8d0, 1584 0x0256bb04,
1600 0xf4092321, 1585 0x75fd50bd,
1601 0xd9f00421, 1586 0xf550fc04,
1587 0xb6087621,
1588/* 0x0916: i2c_addr_done */
1589 0x00f80464,
1590/* 0x0918: i2c_acquire_addr */
1591 0xb6f8cec7,
1592 0xe0b705e4,
1593 0x00f8d014,
1594/* 0x0924: i2c_acquire */
1595 0x091821f5,
1596 0xf00421f4,
1597 0x21f403d9,
1598/* 0x0933: i2c_release */
1599 0xf500f833,
1600 0xf4091821,
1601 0xdaf00421,
1602 0x3321f403, 1602 0x3321f403,
1603/* 0x093e: i2c_release */ 1603/* 0x0942: i2c_recv */
1604 0x21f500f8, 1604 0x32f400f8,
1605 0x21f40923, 1605 0xf8c1c701,
1606 0x03daf004, 1606 0xb00214b6,
1607 0xf83321f4, 1607 0x1ff52816,
1608/* 0x094d: i2c_recv */ 1608 0x13a0013a,
1609 0x0132f400, 1609 0x32980cf4,
1610 0xb6f8c1c7, 1610 0xcc13a000,
1611 0x16b00214, 1611 0x0031980c,
1612 0x3a1ff528, 1612 0xf90231f4,
1613 0xf413a001, 1613 0xf9e0f9d0,
1614 0x0032980c, 1614 0x0067f1d0,
1615 0x0ccc13a0, 1615 0x0063f100,
1616 0xf4003198, 1616 0x01679210,
1617 0xd0f90231, 1617 0xb60076bb,
1618 0xd0f9e0f9, 1618 0x50f90465,
1619 0x000067f1, 1619 0xbb046594,
1620 0x100063f1, 1620 0x50bd0256,
1621 0xbb016792, 1621 0xfc0475fd,
1622 0x65b60076, 1622 0x2421f550,
1623 0x9450f904, 1623 0x0464b609,
1624 0x56bb0465, 1624 0xd6b0d0fc,
1625 0xfd50bd02, 1625 0xb31bf500,
1626 0x50fc0475, 1626 0x0057f000,
1627 0x092f21f5, 1627 0xb60076bb,
1628 0xfc0464b6, 1628 0x50f90465,
1629 0x00d6b0d0, 1629 0xbb046594,
1630 0x00b31bf5, 1630 0x50bd0256,
1631 0xbb0057f0, 1631 0xfc0475fd,
1632 0xd121f550,
1633 0x0464b608,
1634 0x00d011f5,
1635 0xbbe0c5c7,
1632 0x65b60076, 1636 0x65b60076,
1633 0x9450f904, 1637 0x9450f904,
1634 0x56bb0465, 1638 0x56bb0465,
1635 0xfd50bd02, 1639 0xfd50bd02,
1636 0x50fc0475, 1640 0x50fc0475,
1637 0x08dc21f5, 1641 0x087621f5,
1638 0xf50464b6, 1642 0xf50464b6,
1639 0xc700d011, 1643 0xf000ad11,
1640 0x76bbe0c5, 1644 0x76bb0157,
1641 0x0465b600, 1645 0x0465b600,
1642 0x659450f9, 1646 0x659450f9,
1643 0x0256bb04, 1647 0x0256bb04,
1644 0x75fd50bd, 1648 0x75fd50bd,
1645 0xf550fc04, 1649 0xf550fc04,
1646 0xb6088121, 1650 0xb608d121,
1647 0x11f50464, 1651 0x11f50464,
1648 0x57f000ad, 1652 0x76bb008a,
1649 0x0076bb01, 1653 0x0465b600,
1650 0xf90465b6, 1654 0x659450f9,
1651 0x04659450, 1655 0x0256bb04,
1652 0xbd0256bb, 1656 0x75fd50bd,
1653 0x0475fd50, 1657 0xf550fc04,
1654 0x21f550fc, 1658 0xb6082421,
1655 0x64b608dc, 1659 0x11f40464,
1656 0x8a11f504, 1660 0xe05bcb6a,
1657 0x0076bb00, 1661 0xb60076bb,
1658 0xf90465b6, 1662 0x50f90465,
1659 0x04659450, 1663 0xbb046594,
1660 0xbd0256bb, 1664 0x50bd0256,
1661 0x0475fd50, 1665 0xfc0475fd,
1662 0x21f550fc, 1666 0x6921f550,
1663 0x64b6082f, 1667 0x0464b607,
1664 0x6a11f404, 1668 0xbd025bb9,
1665 0xbbe05bcb, 1669 0x430ef474,
1666 0x65b60076, 1670/* 0x0a48: i2c_recv_not_rd08 */
1667 0x9450f904, 1671 0xf401d6b0,
1668 0x56bb0465, 1672 0x57f03d1b,
1669 0xfd50bd02, 1673 0xd121f500,
1670 0x50fc0475, 1674 0x3311f408,
1671 0x077421f5, 1675 0xf5e0c5c7,
1672 0xb90464b6, 1676 0xf4087621,
1673 0x74bd025b, 1677 0x57f02911,
1674/* 0x0a53: i2c_recv_not_rd08 */ 1678 0xd121f500,
1675 0xb0430ef4, 1679 0x1f11f408,
1676 0x1bf401d6, 1680 0xf5e0b5c7,
1677 0x0057f03d, 1681 0xf4087621,
1678 0x08dc21f5, 1682 0x21f51511,
1679 0xc73311f4, 1683 0x74bd0769,
1680 0x21f5e0c5, 1684 0xf408c5c7,
1681 0x11f40881, 1685 0x32f4091b,
1682 0x0057f029, 1686 0x030ef402,
1683 0x08dc21f5, 1687/* 0x0a88: i2c_recv_not_wr08 */
1684 0xc71f11f4, 1688/* 0x0a88: i2c_recv_done */
1685 0x21f5e0b5, 1689 0xf5f8cec7,
1686 0x11f40881, 1690 0xfc093321,
1687 0x7421f515, 1691 0xf4d0fce0,
1688 0xc774bd07, 1692 0x7cb90a12,
1689 0x1bf408c5, 1693 0xe621f502,
1690 0x0232f409, 1694/* 0x0a9d: i2c_recv_exit */
1691/* 0x0a93: i2c_recv_not_wr08 */ 1695/* 0x0a9f: i2c_init */
1692/* 0x0a93: i2c_recv_done */
1693 0xc7030ef4,
1694 0x21f5f8ce,
1695 0xe0fc093e,
1696 0x12f4d0fc,
1697 0x027cb90a,
1698 0x02f121f5,
1699/* 0x0aa8: i2c_recv_exit */
1700/* 0x0aaa: i2c_init */
1701 0x00f800f8,
1702/* 0x0aac: test_recv */
1703 0x05d817f1,
1704 0xb60011cf,
1705 0x07f10110,
1706 0x01d005d8,
1707 0xf104bd00,
1708 0xf1d900e7,
1709 0xf5134fe3,
1710 0xf8022321,
1711/* 0x0acd: test_init */
1712 0x00e7f100,
1713 0x2321f508,
1714/* 0x0ad7: idle_recv */
1715 0xf800f802, 1696 0xf800f802,
1716/* 0x0ad9: idle */ 1697/* 0x0aa1: test_recv */
1717 0x0031f400, 1698 0xd817f100,
1718 0x05d417f1, 1699 0x0011cf05,
1719 0xb60011cf, 1700 0xf10110b6,
1720 0x07f10110, 1701 0xd005d807,
1721 0x01d005d4, 1702 0x04bd0001,
1722/* 0x0aef: idle_loop */ 1703 0xd900e7f1,
1723 0xf004bd00, 1704 0x134fe3f1,
1724 0x32f45817, 1705 0x021821f5,
1725/* 0x0af5: idle_proc */ 1706/* 0x0ac2: test_init */
1726/* 0x0af5: idle_proc_exec */ 1707 0xe7f100f8,
1727 0xb910f902, 1708 0x21f50800,
1728 0x21f5021e, 1709 0x00f80218,
1729 0x10fc02fa, 1710/* 0x0acc: idle_recv */
1730 0xf40911f4, 1711/* 0x0ace: idle */
1731 0x0ef40231, 1712 0x31f400f8,
1732/* 0x0b09: idle_proc_next */ 1713 0xd417f100,
1733 0x5810b6ef, 1714 0x0011cf05,
1734 0xf4061fb8, 1715 0xf10110b6,
1735 0x02f4e61b, 1716 0xd005d407,
1736 0x0028f4dd, 1717 0x04bd0001,
1737 0x00c10ef4, 1718/* 0x0ae4: idle_loop */
1719 0xf45817f0,
1720/* 0x0aea: idle_proc */
1721/* 0x0aea: idle_proc_exec */
1722 0x10f90232,
1723 0xf5021eb9,
1724 0xfc02ef21,
1725 0x0911f410,
1726 0xf40231f4,
1727/* 0x0afe: idle_proc_next */
1728 0x10b6ef0e,
1729 0x061fb858,
1730 0xf4e61bf4,
1731 0x28f4dd02,
1732 0xc10ef400,
1733 0x00000000,
1734 0x00000000,
1735 0x00000000,
1738 0x00000000, 1736 0x00000000,
1739 0x00000000, 1737 0x00000000,
1740 0x00000000, 1738 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index fe4f63deeaab..8a2b628642ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -24,8 +24,8 @@ uint32_t gk208_pmu_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x00000453, 27 0x00000447,
28 0x00000404, 28 0x000003f8,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t gk208_pmu_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000062d, 49 0x00000621,
50 0x0000061f, 50 0x00000613,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t gk208_pmu_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000631, 71 0x00000625,
72 0x0000062f, 72 0x00000623,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t gk208_pmu_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000a35, 93 0x00000a29,
94 0x000008dc, 94 0x000008d0,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t gk208_pmu_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000a56, 115 0x00000a4a,
116 0x00000a37, 116 0x00000a2b,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t gk208_pmu_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000a61, 137 0x00000a55,
138 0x00000a5f, 138 0x00000a53,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -229,26 +229,26 @@ uint32_t gk208_pmu_data[] = {
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00000001, 230 0x00000001,
231 0x00000000, 231 0x00000000,
232 0x00000483, 232 0x00000477,
233/* 0x037c: memx_func_next */ 233/* 0x037c: memx_func_next */
234 0x00000002, 234 0x00000002,
235 0x00000000, 235 0x00000000,
236 0x00000500, 236 0x000004f4,
237 0x00000003, 237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x00000580, 239 0x00000574,
240 0x00040004, 240 0x00040004,
241 0x00000000, 241 0x00000000,
242 0x0000059d, 242 0x00000591,
243 0x00010005, 243 0x00010005,
244 0x00000000, 244 0x00000000,
245 0x000005b7, 245 0x000005ab,
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x0000057b, 248 0x0000056f,
249 0x00000007, 249 0x00000007,
250 0x00000000, 250 0x00000000,
251 0x000005c3, 251 0x000005b7,
252/* 0x03c4: memx_func_tail */ 252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */ 253/* 0x03c4: memx_ts_start */
254 0x00000000, 254 0x00000000,
@@ -916,7 +916,7 @@ uint32_t gk208_pmu_data[] = {
916}; 916};
917 917
918uint32_t gk208_pmu_code[] = { 918uint32_t gk208_pmu_code[] = {
919 0x031c0ef5, 919 0x03100ef5,
920/* 0x0004: rd32 */ 920/* 0x0004: rd32 */
921 0xf607a040, 921 0xf607a040,
922 0x04bd000e, 922 0x04bd000e,
@@ -972,7 +972,7 @@ uint32_t gk208_pmu_code[] = {
972 0x0a98280b, 972 0x0a98280b,
973 0x029abb9a, 973 0x029abb9a,
974 0x0d0e1cf4, 974 0x0d0e1cf4,
975 0x02617e01, 975 0x02557e01,
976 0xf494bd00, 976 0xf494bd00,
977/* 0x00c2: intr_watchdog_next_time */ 977/* 0x00c2: intr_watchdog_next_time */
978 0x0a98140e, 978 0x0a98140e,
@@ -1017,21 +1017,16 @@ uint32_t gk208_pmu_code[] = {
1017 0xc0f900cc, 1017 0xc0f900cc,
1018 0xf14f484e, 1018 0xf14f484e,
1019 0x0d5453e3, 1019 0x0d5453e3,
1020 0x02c27e00, 1020 0x02b67e00,
1021 0x40c0fc00, 1021 0x40c0fc00,
1022 0x0cf604c0, 1022 0x0cf604c0,
1023/* 0x0167: intr_subintr_skip_fifo */ 1023/* 0x0167: intr_subintr_skip_fifo */
1024 0x4004bd00, 1024 0x4004bd00,
1025 0x09f60688, 1025 0x09f60688,
1026/* 0x016f: intr_skip_subintr */ 1026/* 0x016f: intr_skip_subintr */
1027 0xc404bd00, 1027 0x4904bd00,
1028 0x0bf42089, 1028 0x90bd00e0,
1029 0xbfa4f107, 1029 0x000489fd,
1030/* 0x0179: intr_skip_pause */
1031 0x4089c4ff,
1032 0xf1070bf4,
1033/* 0x0183: intr_skip_user0 */
1034 0x00ffbfa4,
1035 0x0008f604, 1030 0x0008f604,
1036 0x80fc04bd, 1031 0x80fc04bd,
1037 0xfc0088fe, 1032 0xfc0088fe,
@@ -1040,35 +1035,35 @@ uint32_t gk208_pmu_code[] = {
1040 0xfca0fcb0, 1035 0xfca0fcb0,
1041 0xfc80fc90, 1036 0xfc80fc90,
1042 0x0032f400, 1037 0x0032f400,
1043/* 0x01a6: ticks_from_ns */ 1038/* 0x019a: ticks_from_ns */
1044 0xc0f901f8, 1039 0xc0f901f8,
1045 0xd7f1b0f9, 1040 0xd7f1b0f9,
1046 0xd3f00144, 1041 0xd3f00144,
1047 0x7721f500, 1042 0x6b21f500,
1048 0xe8ccec03, 1043 0xe8ccec03,
1049 0x00b4b003, 1044 0x00b4b003,
1050 0xec120bf4, 1045 0xec120bf4,
1051 0xf103e8ee, 1046 0xf103e8ee,
1052 0xf00144d7, 1047 0xf00144d7,
1053 0x21f500d3, 1048 0x21f500d3,
1054/* 0x01ce: ticks_from_ns_quit */ 1049/* 0x01c2: ticks_from_ns_quit */
1055 0xceb20377, 1050 0xceb2036b,
1056 0xc0fcb0fc, 1051 0xc0fcb0fc,
1057/* 0x01d6: ticks_from_us */ 1052/* 0x01ca: ticks_from_us */
1058 0xc0f900f8, 1053 0xc0f900f8,
1059 0xd7f1b0f9, 1054 0xd7f1b0f9,
1060 0xd3f00144, 1055 0xd3f00144,
1061 0x7721f500, 1056 0x6b21f500,
1062 0xb0ceb203, 1057 0xb0ceb203,
1063 0x0bf400b4, 1058 0x0bf400b4,
1064/* 0x01ef: ticks_from_us_quit */ 1059/* 0x01e3: ticks_from_us_quit */
1065 0xfce4bd05, 1060 0xfce4bd05,
1066 0xf8c0fcb0, 1061 0xf8c0fcb0,
1067/* 0x01f5: ticks_to_us */ 1062/* 0x01e9: ticks_to_us */
1068 0x44d7f100, 1063 0x44d7f100,
1069 0x00d3f001, 1064 0x00d3f001,
1070 0xf8ecedff, 1065 0xf8ecedff,
1071/* 0x0201: timer */ 1066/* 0x01f5: timer */
1072 0xf990f900, 1067 0xf990f900,
1073 0x1032f480, 1068 0x1032f480,
1074 0xb003f898, 1069 0xb003f898,
@@ -1086,17 +1081,17 @@ uint32_t gk208_pmu_code[] = {
1086 0xa60088cf, 1081 0xa60088cf,
1087 0x080bf4e0, 1082 0x080bf4e0,
1088 0x1cf4e8a6, 1083 0x1cf4e8a6,
1089/* 0x0245: timer_reset */ 1084/* 0x0239: timer_reset */
1090 0xf634000d, 1085 0xf634000d,
1091 0x04bd000e, 1086 0x04bd000e,
1092/* 0x024f: timer_enable */ 1087/* 0x0243: timer_enable */
1093 0x089a0eb5, 1088 0x089a0eb5,
1094 0xf6380001, 1089 0xf6380001,
1095 0x04bd0008, 1090 0x04bd0008,
1096/* 0x0258: timer_done */ 1091/* 0x024c: timer_done */
1097 0xfc1031f4, 1092 0xfc1031f4,
1098 0xf890fc80, 1093 0xf890fc80,
1099/* 0x0261: send_proc */ 1094/* 0x0255: send_proc */
1100 0xf980f900, 1095 0xf980f900,
1101 0x05e89890, 1096 0x05e89890,
1102 0xf004e998, 1097 0xf004e998,
@@ -1111,24 +1106,24 @@ uint32_t gk208_pmu_code[] = {
1111 0x90b6038b, 1106 0x90b6038b,
1112 0x0794f001, 1107 0x0794f001,
1113 0xf404e9b5, 1108 0xf404e9b5,
1114/* 0x029a: send_done */ 1109/* 0x028e: send_done */
1115 0x90fc0231, 1110 0x90fc0231,
1116 0x00f880fc, 1111 0x00f880fc,
1117/* 0x02a0: find */ 1112/* 0x0294: find */
1118 0x580880f9, 1113 0x580880f9,
1119/* 0x02a7: find_loop */ 1114/* 0x029b: find_loop */
1120 0x980131f4, 1115 0x980131f4,
1121 0xaea6008a, 1116 0xaea6008a,
1122 0xb6100bf4, 1117 0xb6100bf4,
1123 0x86b15880, 1118 0x86b15880,
1124 0x1bf40268, 1119 0x1bf40268,
1125 0x0132f4f1, 1120 0x0132f4f1,
1126/* 0x02bc: find_done */ 1121/* 0x02b0: find_done */
1127 0x80fc8eb2, 1122 0x80fc8eb2,
1128/* 0x02c2: send */ 1123/* 0x02b6: send */
1129 0xa07e00f8, 1124 0x947e00f8,
1130 0x01f40002, 1125 0x01f40002,
1131/* 0x02cb: recv */ 1126/* 0x02bf: recv */
1132 0xf900f89b, 1127 0xf900f89b,
1133 0x9880f990, 1128 0x9880f990,
1134 0xe99805e8, 1129 0xe99805e8,
@@ -1148,10 +1143,10 @@ uint32_t gk208_pmu_code[] = {
1148 0xa5f900ee, 1143 0xa5f900ee,
1149 0xf8fef0fc, 1144 0xf8fef0fc,
1150 0x0131f400, 1145 0x0131f400,
1151/* 0x0316: recv_done */ 1146/* 0x030a: recv_done */
1152 0x80fcf0fc, 1147 0x80fcf0fc,
1153 0x00f890fc, 1148 0x00f890fc,
1154/* 0x031c: init */ 1149/* 0x0310: init */
1155 0xcf010841, 1150 0xcf010841,
1156 0x11e70011, 1151 0x11e70011,
1157 0x14b60109, 1152 0x14b60109,
@@ -1170,12 +1165,12 @@ uint32_t gk208_pmu_code[] = {
1170 0x011031f4, 1165 0x011031f4,
1171 0xf6380001, 1166 0xf6380001,
1172 0x04bd0001, 1167 0x04bd0001,
1173/* 0x0366: init_proc */ 1168/* 0x035a: init_proc */
1174 0xf198580f, 1169 0xf198580f,
1175 0x0016b001, 1170 0x0016b001,
1176 0xf9fa0bf4, 1171 0xf9fa0bf4,
1177 0x58f0b615, 1172 0x58f0b615,
1178/* 0x0377: mulu32_32_64 */ 1173/* 0x036b: mulu32_32_64 */
1179 0xf9f20ef4, 1174 0xf9f20ef4,
1180 0xf920f910, 1175 0xf920f910,
1181 0x9540f930, 1176 0x9540f930,
@@ -1196,7 +1191,7 @@ uint32_t gk208_pmu_code[] = {
1196 0x00b3bb30, 1191 0x00b3bb30,
1197 0x30fc40fc, 1192 0x30fc40fc,
1198 0x10fc20fc, 1193 0x10fc20fc,
1199/* 0x03c6: host_send */ 1194/* 0x03ba: host_send */
1200 0xb04100f8, 1195 0xb04100f8,
1201 0x0011cf04, 1196 0x0011cf04,
1202 0xcf04a042, 1197 0xcf04a042,
@@ -1207,18 +1202,18 @@ uint32_t gk208_pmu_code[] = {
1207 0x03eb9802, 1202 0x03eb9802,
1208 0x9802ec98, 1203 0x9802ec98,
1209 0xee9801ed, 1204 0xee9801ed,
1210 0x02c27e00, 1205 0x02b67e00,
1211 0x0110b600, 1206 0x0110b600,
1212 0x400f1ec4, 1207 0x400f1ec4,
1213 0x0ef604b0, 1208 0x0ef604b0,
1214 0xf404bd00, 1209 0xf404bd00,
1215/* 0x0402: host_send_done */ 1210/* 0x03f6: host_send_done */
1216 0x00f8c70e, 1211 0x00f8c70e,
1217/* 0x0404: host_recv */ 1212/* 0x03f8: host_recv */
1218 0xf14e4941, 1213 0xf14e4941,
1219 0xa6525413, 1214 0xa6525413,
1220 0xb90bf4e1, 1215 0xb90bf4e1,
1221/* 0x0410: host_recv_wait */ 1216/* 0x0404: host_recv_wait */
1222 0xcf04cc41, 1217 0xcf04cc41,
1223 0xc8420011, 1218 0xc8420011,
1224 0x0022cf04, 1219 0x0022cf04,
@@ -1235,7 +1230,7 @@ uint32_t gk208_pmu_code[] = {
1235 0x04bd0002, 1230 0x04bd0002,
1236 0x00004002, 1231 0x00004002,
1237 0xbd0002f6, 1232 0xbd0002f6,
1238/* 0x0453: host_init */ 1233/* 0x0447: host_init */
1239 0x4100f804, 1234 0x4100f804,
1240 0x14b60080, 1235 0x14b60080,
1241 0x7015f110, 1236 0x7015f110,
@@ -1248,7 +1243,7 @@ uint32_t gk208_pmu_code[] = {
1248 0x0104bd00, 1243 0x0104bd00,
1249 0x04c44001, 1244 0x04c44001,
1250 0xbd0001f6, 1245 0xbd0001f6,
1251/* 0x0483: memx_func_enter */ 1246/* 0x0477: memx_func_enter */
1252 0xf100f804, 1247 0xf100f804,
1253 0xf1162067, 1248 0xf1162067,
1254 0xf1f55d77, 1249 0xf1f55d77,
@@ -1275,19 +1270,19 @@ uint32_t gk208_pmu_code[] = {
1275 0x00002e7e, 1270 0x00002e7e,
1276 0xe0400406, 1271 0xe0400406,
1277 0x0006f607, 1272 0x0006f607,
1278/* 0x04ea: memx_func_enter_wait */ 1273/* 0x04de: memx_func_enter_wait */
1279 0xc04604bd, 1274 0xc04604bd,
1280 0x0066cf07, 1275 0x0066cf07,
1281 0xf40464f0, 1276 0xf40464f0,
1282 0x2c06f70b, 1277 0x2c06f70b,
1283 0xb50066cf, 1278 0xb50066cf,
1284 0x00f8f106, 1279 0x00f8f106,
1285/* 0x0500: memx_func_leave */ 1280/* 0x04f4: memx_func_leave */
1286 0x66cf2c06, 1281 0x66cf2c06,
1287 0xf206b500, 1282 0xf206b500,
1288 0xe4400406, 1283 0xe4400406,
1289 0x0006f607, 1284 0x0006f607,
1290/* 0x0512: memx_func_leave_wait */ 1285/* 0x0506: memx_func_leave_wait */
1291 0xc04604bd, 1286 0xc04604bd,
1292 0x0066cf07, 1287 0x0066cf07,
1293 0xf40464f0, 1288 0xf40464f0,
@@ -1314,10 +1309,10 @@ uint32_t gk208_pmu_code[] = {
1314 0xf960f905, 1309 0xf960f905,
1315 0xfcd0fc80, 1310 0xfcd0fc80,
1316 0x002e7ee0, 1311 0x002e7ee0,
1317/* 0x057b: memx_func_wait_vblank */ 1312/* 0x056f: memx_func_wait_vblank */
1318 0xb600f800, 1313 0xb600f800,
1319 0x00f80410, 1314 0x00f80410,
1320/* 0x0580: memx_func_wr32 */ 1315/* 0x0574: memx_func_wr32 */
1321 0x98001698, 1316 0x98001698,
1322 0x10b60115, 1317 0x10b60115,
1323 0xf960f908, 1318 0xf960f908,
@@ -1325,23 +1320,23 @@ uint32_t gk208_pmu_code[] = {
1325 0x002e7ee0, 1320 0x002e7ee0,
1326 0x0242b600, 1321 0x0242b600,
1327 0xf8e81bf4, 1322 0xf8e81bf4,
1328/* 0x059d: memx_func_wait */ 1323/* 0x0591: memx_func_wait */
1329 0xcf2c0800, 1324 0xcf2c0800,
1330 0x1e980088, 1325 0x1e980088,
1331 0x011d9800, 1326 0x011d9800,
1332 0x98021c98, 1327 0x98021c98,
1333 0x10b6031b, 1328 0x10b6031b,
1334 0x00797e10, 1329 0x00797e10,
1335/* 0x05b7: memx_func_delay */ 1330/* 0x05ab: memx_func_delay */
1336 0x9800f800, 1331 0x9800f800,
1337 0x10b6001e, 1332 0x10b6001e,
1338 0x005d7e04, 1333 0x005d7e04,
1339/* 0x05c3: memx_func_train */ 1334/* 0x05b7: memx_func_train */
1340 0xf800f800, 1335 0xf800f800,
1341/* 0x05c5: memx_exec */ 1336/* 0x05b9: memx_exec */
1342 0xf9e0f900, 1337 0xf9e0f900,
1343 0xb2c1b2d0, 1338 0xb2c1b2d0,
1344/* 0x05cd: memx_exec_next */ 1339/* 0x05c1: memx_exec_next */
1345 0x001398b2, 1340 0x001398b2,
1346 0xe70410b6, 1341 0xe70410b6,
1347 0xe701f034, 1342 0xe701f034,
@@ -1354,111 +1349,111 @@ uint32_t gk208_pmu_code[] = {
1354 0x02cbbbf2, 1349 0x02cbbbf2,
1355 0xcf07c44b, 1350 0xcf07c44b,
1356 0xd0fc00bb, 1351 0xd0fc00bb,
1357 0xc27ee0fc, 1352 0xb67ee0fc,
1358 0x00f80002, 1353 0x00f80002,
1359/* 0x0604: memx_info */ 1354/* 0x05f8: memx_info */
1360 0xf401c670, 1355 0xf401c670,
1361/* 0x060a: memx_info_data */ 1356/* 0x05fe: memx_info_data */
1362 0xcc4c0c0b, 1357 0xcc4c0c0b,
1363 0x08004b03, 1358 0x08004b03,
1364/* 0x0613: memx_info_train */ 1359/* 0x0607: memx_info_train */
1365 0x4c090ef4, 1360 0x4c090ef4,
1366 0x004b0bcc, 1361 0x004b0bcc,
1367/* 0x0619: memx_info_send */ 1362/* 0x060d: memx_info_send */
1368 0x02c27e01, 1363 0x02b67e01,
1369/* 0x061f: memx_recv */ 1364/* 0x0613: memx_recv */
1370 0xb000f800, 1365 0xb000f800,
1371 0x0bf401d6, 1366 0x0bf401d6,
1372 0x00d6b0a3, 1367 0x00d6b0a3,
1373 0xf8dc0bf4, 1368 0xf8dc0bf4,
1374/* 0x062d: memx_init */ 1369/* 0x0621: memx_init */
1375/* 0x062f: perf_recv */ 1370/* 0x0623: perf_recv */
1376 0xf800f800, 1371 0xf800f800,
1377/* 0x0631: perf_init */ 1372/* 0x0625: perf_init */
1378/* 0x0633: i2c_drive_scl */ 1373/* 0x0627: i2c_drive_scl */
1379 0xb000f800, 1374 0xb000f800,
1380 0x0bf40036, 1375 0x0bf40036,
1381 0x07e0400d, 1376 0x07e0400d,
1382 0xbd0001f6, 1377 0xbd0001f6,
1383/* 0x0643: i2c_drive_scl_lo */ 1378/* 0x0637: i2c_drive_scl_lo */
1384 0x4000f804, 1379 0x4000f804,
1385 0x01f607e4, 1380 0x01f607e4,
1386 0xf804bd00, 1381 0xf804bd00,
1387/* 0x064d: i2c_drive_sda */ 1382/* 0x0641: i2c_drive_sda */
1388 0x0036b000, 1383 0x0036b000,
1389 0x400d0bf4, 1384 0x400d0bf4,
1390 0x02f607e0, 1385 0x02f607e0,
1391 0xf804bd00, 1386 0xf804bd00,
1392/* 0x065d: i2c_drive_sda_lo */ 1387/* 0x0651: i2c_drive_sda_lo */
1393 0x07e44000, 1388 0x07e44000,
1394 0xbd0002f6, 1389 0xbd0002f6,
1395/* 0x0667: i2c_sense_scl */ 1390/* 0x065b: i2c_sense_scl */
1396 0xf400f804, 1391 0xf400f804,
1397 0xc4430132, 1392 0xc4430132,
1398 0x0033cf07, 1393 0x0033cf07,
1399 0xf40431fd, 1394 0xf40431fd,
1400 0x31f4060b, 1395 0x31f4060b,
1401/* 0x0679: i2c_sense_scl_done */ 1396/* 0x066d: i2c_sense_scl_done */
1402/* 0x067b: i2c_sense_sda */ 1397/* 0x066f: i2c_sense_sda */
1403 0xf400f801, 1398 0xf400f801,
1404 0xc4430132, 1399 0xc4430132,
1405 0x0033cf07, 1400 0x0033cf07,
1406 0xf40432fd, 1401 0xf40432fd,
1407 0x31f4060b, 1402 0x31f4060b,
1408/* 0x068d: i2c_sense_sda_done */ 1403/* 0x0681: i2c_sense_sda_done */
1409/* 0x068f: i2c_raise_scl */ 1404/* 0x0683: i2c_raise_scl */
1410 0xf900f801, 1405 0xf900f801,
1411 0x08984440, 1406 0x08984440,
1412 0x337e0103, 1407 0x277e0103,
1413/* 0x069a: i2c_raise_scl_wait */ 1408/* 0x068e: i2c_raise_scl_wait */
1414 0xe84e0006, 1409 0xe84e0006,
1415 0x005d7e03, 1410 0x005d7e03,
1416 0x06677e00, 1411 0x065b7e00,
1417 0x0901f400, 1412 0x0901f400,
1418 0xf40142b6, 1413 0xf40142b6,
1419/* 0x06ae: i2c_raise_scl_done */ 1414/* 0x06a2: i2c_raise_scl_done */
1420 0x40fcef1b, 1415 0x40fcef1b,
1421/* 0x06b2: i2c_start */ 1416/* 0x06a6: i2c_start */
1422 0x677e00f8, 1417 0x5b7e00f8,
1423 0x11f40006, 1418 0x11f40006,
1424 0x067b7e0d, 1419 0x066f7e0d,
1425 0x0611f400, 1420 0x0611f400,
1426/* 0x06c3: i2c_start_rep */ 1421/* 0x06b7: i2c_start_rep */
1427 0x032e0ef4, 1422 0x032e0ef4,
1428 0x06337e00, 1423 0x06277e00,
1429 0x7e010300, 1424 0x7e010300,
1430 0xbb00064d, 1425 0xbb000641,
1431 0x65b60076, 1426 0x65b60076,
1432 0x9450f904, 1427 0x9450f904,
1433 0x56bb0465, 1428 0x56bb0465,
1434 0xfd50bd02, 1429 0xfd50bd02,
1435 0x50fc0475, 1430 0x50fc0475,
1436 0x00068f7e, 1431 0x0006837e,
1437 0xf40464b6, 1432 0xf40464b6,
1438/* 0x06ee: i2c_start_send */ 1433/* 0x06e2: i2c_start_send */
1439 0x00031d11, 1434 0x00031d11,
1440 0x00064d7e, 1435 0x0006417e,
1441 0x7e13884e, 1436 0x7e13884e,
1442 0x0300005d, 1437 0x0300005d,
1443 0x06337e00, 1438 0x06277e00,
1444 0x13884e00, 1439 0x13884e00,
1445 0x00005d7e, 1440 0x00005d7e,
1446/* 0x0708: i2c_start_out */ 1441/* 0x06fc: i2c_start_out */
1447/* 0x070a: i2c_stop */ 1442/* 0x06fe: i2c_stop */
1448 0x000300f8, 1443 0x000300f8,
1449 0x0006337e, 1444 0x0006277e,
1450 0x4d7e0003, 1445 0x417e0003,
1451 0xe84e0006, 1446 0xe84e0006,
1452 0x005d7e03, 1447 0x005d7e03,
1453 0x7e010300, 1448 0x7e010300,
1454 0x4e000633, 1449 0x4e000627,
1455 0x5d7e1388, 1450 0x5d7e1388,
1456 0x01030000, 1451 0x01030000,
1457 0x00064d7e, 1452 0x0006417e,
1458 0x7e13884e, 1453 0x7e13884e,
1459 0xf800005d, 1454 0xf800005d,
1460/* 0x0739: i2c_bitw */ 1455/* 0x072d: i2c_bitw */
1461 0x064d7e00, 1456 0x06417e00,
1462 0x03e84e00, 1457 0x03e84e00,
1463 0x00005d7e, 1458 0x00005d7e,
1464 0xb60076bb, 1459 0xb60076bb,
@@ -1466,18 +1461,18 @@ uint32_t gk208_pmu_code[] = {
1466 0xbb046594, 1461 0xbb046594,
1467 0x50bd0256, 1462 0x50bd0256,
1468 0xfc0475fd, 1463 0xfc0475fd,
1469 0x068f7e50, 1464 0x06837e50,
1470 0x0464b600, 1465 0x0464b600,
1471 0x4e1711f4, 1466 0x4e1711f4,
1472 0x5d7e1388, 1467 0x5d7e1388,
1473 0x00030000, 1468 0x00030000,
1474 0x0006337e, 1469 0x0006277e,
1475 0x7e13884e, 1470 0x7e13884e,
1476/* 0x0777: i2c_bitw_out */ 1471/* 0x076b: i2c_bitw_out */
1477 0xf800005d, 1472 0xf800005d,
1478/* 0x0779: i2c_bitr */ 1473/* 0x076d: i2c_bitr */
1479 0x7e010300, 1474 0x7e010300,
1480 0x4e00064d, 1475 0x4e000641,
1481 0x5d7e03e8, 1476 0x5d7e03e8,
1482 0x76bb0000, 1477 0x76bb0000,
1483 0x0465b600, 1478 0x0465b600,
@@ -1485,25 +1480,25 @@ uint32_t gk208_pmu_code[] = {
1485 0x0256bb04, 1480 0x0256bb04,
1486 0x75fd50bd, 1481 0x75fd50bd,
1487 0x7e50fc04, 1482 0x7e50fc04,
1488 0xb600068f, 1483 0xb6000683,
1489 0x11f40464, 1484 0x11f40464,
1490 0x067b7e1a, 1485 0x066f7e1a,
1491 0x7e000300, 1486 0x7e000300,
1492 0x4e000633, 1487 0x4e000627,
1493 0x5d7e1388, 1488 0x5d7e1388,
1494 0x3cf00000, 1489 0x3cf00000,
1495 0x0131f401, 1490 0x0131f401,
1496/* 0x07bc: i2c_bitr_done */ 1491/* 0x07b0: i2c_bitr_done */
1497/* 0x07be: i2c_get_byte */ 1492/* 0x07b2: i2c_get_byte */
1498 0x000500f8, 1493 0x000500f8,
1499/* 0x07c2: i2c_get_byte_next */ 1494/* 0x07b6: i2c_get_byte_next */
1500 0x54b60804, 1495 0x54b60804,
1501 0x0076bb01, 1496 0x0076bb01,
1502 0xf90465b6, 1497 0xf90465b6,
1503 0x04659450, 1498 0x04659450,
1504 0xbd0256bb, 1499 0xbd0256bb,
1505 0x0475fd50, 1500 0x0475fd50,
1506 0x797e50fc, 1501 0x6d7e50fc,
1507 0x64b60007, 1502 0x64b60007,
1508 0x2a11f404, 1503 0x2a11f404,
1509 0xb60553fd, 1504 0xb60553fd,
@@ -1514,11 +1509,11 @@ uint32_t gk208_pmu_code[] = {
1514 0x56bb0465, 1509 0x56bb0465,
1515 0xfd50bd02, 1510 0xfd50bd02,
1516 0x50fc0475, 1511 0x50fc0475,
1517 0x0007397e, 1512 0x00072d7e,
1518/* 0x080b: i2c_get_byte_done */ 1513/* 0x07ff: i2c_get_byte_done */
1519 0xf80464b6, 1514 0xf80464b6,
1520/* 0x080d: i2c_put_byte */ 1515/* 0x0801: i2c_put_byte */
1521/* 0x080f: i2c_put_byte_next */ 1516/* 0x0803: i2c_put_byte_next */
1522 0xb6080400, 1517 0xb6080400,
1523 0x54ff0142, 1518 0x54ff0142,
1524 0x0076bb38, 1519 0x0076bb38,
@@ -1526,7 +1521,7 @@ uint32_t gk208_pmu_code[] = {
1526 0x04659450, 1521 0x04659450,
1527 0xbd0256bb, 1522 0xbd0256bb,
1528 0x0475fd50, 1523 0x0475fd50,
1529 0x397e50fc, 1524 0x2d7e50fc,
1530 0x64b60007, 1525 0x64b60007,
1531 0x3411f404, 1526 0x3411f404,
1532 0xf40046b0, 1527 0xf40046b0,
@@ -1536,20 +1531,20 @@ uint32_t gk208_pmu_code[] = {
1536 0x0256bb04, 1531 0x0256bb04,
1537 0x75fd50bd, 1532 0x75fd50bd,
1538 0x7e50fc04, 1533 0x7e50fc04,
1539 0xb6000779, 1534 0xb600076d,
1540 0x11f40464, 1535 0x11f40464,
1541 0x0076bb0f, 1536 0x0076bb0f,
1542 0xf40136b0, 1537 0xf40136b0,
1543 0x32f4061b, 1538 0x32f4061b,
1544/* 0x0865: i2c_put_byte_done */ 1539/* 0x0859: i2c_put_byte_done */
1545/* 0x0867: i2c_addr */ 1540/* 0x085b: i2c_addr */
1546 0xbb00f801, 1541 0xbb00f801,
1547 0x65b60076, 1542 0x65b60076,
1548 0x9450f904, 1543 0x9450f904,
1549 0x56bb0465, 1544 0x56bb0465,
1550 0xfd50bd02, 1545 0xfd50bd02,
1551 0x50fc0475, 1546 0x50fc0475,
1552 0x0006b27e, 1547 0x0006a67e,
1553 0xf40464b6, 1548 0xf40464b6,
1554 0xc3e72911, 1549 0xc3e72911,
1555 0x34b6012e, 1550 0x34b6012e,
@@ -1559,25 +1554,25 @@ uint32_t gk208_pmu_code[] = {
1559 0xbb046594, 1554 0xbb046594,
1560 0x50bd0256, 1555 0x50bd0256,
1561 0xfc0475fd, 1556 0xfc0475fd,
1562 0x080d7e50, 1557 0x08017e50,
1563 0x0464b600, 1558 0x0464b600,
1564/* 0x08ac: i2c_addr_done */ 1559/* 0x08a0: i2c_addr_done */
1565/* 0x08ae: i2c_acquire_addr */ 1560/* 0x08a2: i2c_acquire_addr */
1566 0xcec700f8, 1561 0xcec700f8,
1567 0x05e4b6f8, 1562 0x05e4b6f8,
1568 0xd014e0b7, 1563 0xd014e0b7,
1569/* 0x08ba: i2c_acquire */ 1564/* 0x08ae: i2c_acquire */
1570 0xae7e00f8, 1565 0xa27e00f8,
1571 0x047e0008, 1566 0x047e0008,
1572 0xd9f00000, 1567 0xd9f00000,
1573 0x002e7e03, 1568 0x002e7e03,
1574/* 0x08cb: i2c_release */ 1569/* 0x08bf: i2c_release */
1575 0x7e00f800, 1570 0x7e00f800,
1576 0x7e0008ae, 1571 0x7e0008a2,
1577 0xf0000004, 1572 0xf0000004,
1578 0x2e7e03da, 1573 0x2e7e03da,
1579 0x00f80000, 1574 0x00f80000,
1580/* 0x08dc: i2c_recv */ 1575/* 0x08d0: i2c_recv */
1581 0xc70132f4, 1576 0xc70132f4,
1582 0x14b6f8c1, 1577 0x14b6f8c1,
1583 0x2816b002, 1578 0x2816b002,
@@ -1596,7 +1591,7 @@ uint32_t gk208_pmu_code[] = {
1596 0xbb046594, 1591 0xbb046594,
1597 0x50bd0256, 1592 0x50bd0256,
1598 0xfc0475fd, 1593 0xfc0475fd,
1599 0x08ba7e50, 1594 0x08ae7e50,
1600 0x0464b600, 1595 0x0464b600,
1601 0xd6b0d0fc, 1596 0xd6b0d0fc,
1602 0xb01bf500, 1597 0xb01bf500,
@@ -1606,7 +1601,7 @@ uint32_t gk208_pmu_code[] = {
1606 0x56bb0465, 1601 0x56bb0465,
1607 0xfd50bd02, 1602 0xfd50bd02,
1608 0x50fc0475, 1603 0x50fc0475,
1609 0x0008677e, 1604 0x00085b7e,
1610 0xf50464b6, 1605 0xf50464b6,
1611 0xc700cc11, 1606 0xc700cc11,
1612 0x76bbe0c5, 1607 0x76bbe0c5,
@@ -1615,7 +1610,7 @@ uint32_t gk208_pmu_code[] = {
1615 0x0256bb04, 1610 0x0256bb04,
1616 0x75fd50bd, 1611 0x75fd50bd,
1617 0x7e50fc04, 1612 0x7e50fc04,
1618 0xb600080d, 1613 0xb6000801,
1619 0x11f50464, 1614 0x11f50464,
1620 0x010500a9, 1615 0x010500a9,
1621 0xb60076bb, 1616 0xb60076bb,
@@ -1623,7 +1618,7 @@ uint32_t gk208_pmu_code[] = {
1623 0xbb046594, 1618 0xbb046594,
1624 0x50bd0256, 1619 0x50bd0256,
1625 0xfc0475fd, 1620 0xfc0475fd,
1626 0x08677e50, 1621 0x085b7e50,
1627 0x0464b600, 1622 0x0464b600,
1628 0x008711f5, 1623 0x008711f5,
1629 0xb60076bb, 1624 0xb60076bb,
@@ -1631,7 +1626,7 @@ uint32_t gk208_pmu_code[] = {
1631 0xbb046594, 1626 0xbb046594,
1632 0x50bd0256, 1627 0x50bd0256,
1633 0xfc0475fd, 1628 0xfc0475fd,
1634 0x07be7e50, 1629 0x07b27e50,
1635 0x0464b600, 1630 0x0464b600,
1636 0xcb6711f4, 1631 0xcb6711f4,
1637 0x76bbe05b, 1632 0x76bbe05b,
@@ -1640,36 +1635,36 @@ uint32_t gk208_pmu_code[] = {
1640 0x0256bb04, 1635 0x0256bb04,
1641 0x75fd50bd, 1636 0x75fd50bd,
1642 0x7e50fc04, 1637 0x7e50fc04,
1643 0xb600070a, 1638 0xb60006fe,
1644 0x5bb20464, 1639 0x5bb20464,
1645 0x0ef474bd, 1640 0x0ef474bd,
1646/* 0x09e1: i2c_recv_not_rd08 */ 1641/* 0x09d5: i2c_recv_not_rd08 */
1647 0x01d6b041, 1642 0x01d6b041,
1648 0x053b1bf4, 1643 0x053b1bf4,
1649 0x08677e00, 1644 0x085b7e00,
1650 0x3211f400, 1645 0x3211f400,
1651 0x7ee0c5c7, 1646 0x7ee0c5c7,
1652 0xf400080d, 1647 0xf4000801,
1653 0x00052811, 1648 0x00052811,
1654 0x0008677e, 1649 0x00085b7e,
1655 0xc71f11f4, 1650 0xc71f11f4,
1656 0x0d7ee0b5, 1651 0x017ee0b5,
1657 0x11f40008, 1652 0x11f40008,
1658 0x070a7e15, 1653 0x06fe7e15,
1659 0xc774bd00, 1654 0xc774bd00,
1660 0x1bf408c5, 1655 0x1bf408c5,
1661 0x0232f409, 1656 0x0232f409,
1662/* 0x0a1f: i2c_recv_not_wr08 */ 1657/* 0x0a13: i2c_recv_not_wr08 */
1663/* 0x0a1f: i2c_recv_done */ 1658/* 0x0a13: i2c_recv_done */
1664 0xc7030ef4, 1659 0xc7030ef4,
1665 0xcb7ef8ce, 1660 0xbf7ef8ce,
1666 0xe0fc0008, 1661 0xe0fc0008,
1667 0x12f4d0fc, 1662 0x12f4d0fc,
1668 0x7e7cb209, 1663 0x7e7cb209,
1669/* 0x0a33: i2c_recv_exit */ 1664/* 0x0a27: i2c_recv_exit */
1670 0xf80002c2, 1665 0xf80002b6,
1671/* 0x0a35: i2c_init */ 1666/* 0x0a29: i2c_init */
1672/* 0x0a37: test_recv */ 1667/* 0x0a2b: test_recv */
1673 0x4100f800, 1668 0x4100f800,
1674 0x11cf0458, 1669 0x11cf0458,
1675 0x0110b600, 1670 0x0110b600,
@@ -1677,28 +1672,28 @@ uint32_t gk208_pmu_code[] = {
1677 0x04bd0001, 1672 0x04bd0001,
1678 0xd900e7f1, 1673 0xd900e7f1,
1679 0x134fe3f1, 1674 0x134fe3f1,
1680 0x0002017e, 1675 0x0001f57e,
1681/* 0x0a56: test_init */ 1676/* 0x0a4a: test_init */
1682 0x004e00f8, 1677 0x004e00f8,
1683 0x02017e08, 1678 0x01f57e08,
1684/* 0x0a5f: idle_recv */ 1679/* 0x0a53: idle_recv */
1685 0xf800f800, 1680 0xf800f800,
1686/* 0x0a61: idle */ 1681/* 0x0a55: idle */
1687 0x0031f400, 1682 0x0031f400,
1688 0xcf045441, 1683 0xcf045441,
1689 0x10b60011, 1684 0x10b60011,
1690 0x04544001, 1685 0x04544001,
1691 0xbd0001f6, 1686 0xbd0001f6,
1692/* 0x0a75: idle_loop */ 1687/* 0x0a69: idle_loop */
1693 0xf4580104, 1688 0xf4580104,
1694/* 0x0a7a: idle_proc */ 1689/* 0x0a6e: idle_proc */
1695/* 0x0a7a: idle_proc_exec */ 1690/* 0x0a6e: idle_proc_exec */
1696 0x10f90232, 1691 0x10f90232,
1697 0xcb7e1eb2, 1692 0xbf7e1eb2,
1698 0x10fc0002, 1693 0x10fc0002,
1699 0xf40911f4, 1694 0xf40911f4,
1700 0x0ef40231, 1695 0x0ef40231,
1701/* 0x0a8d: idle_proc_next */ 1696/* 0x0a81: idle_proc_next */
1702 0x5810b6f0, 1697 0x5810b6f0,
1703 0x1bf41fa6, 1698 0x1bf41fa6,
1704 0xe002f4e8, 1699 0xe002f4e8,
@@ -1728,4 +1723,7 @@ uint32_t gk208_pmu_code[] = {
1728 0x00000000, 1723 0x00000000,
1729 0x00000000, 1724 0x00000000,
1730 0x00000000, 1725 0x00000000,
1726 0x00000000,
1727 0x00000000,
1728 0x00000000,
1731}; 1729};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 2686f8fad0f5..516569270bac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -24,8 +24,8 @@ uint32_t gt215_pmu_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x00000512, 27 0x00000507,
28 0x000004af, 28 0x000004a4,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t gt215_pmu_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x00000842, 49 0x00000837,
50 0x00000834, 50 0x00000829,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t gt215_pmu_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000846, 71 0x0000083b,
72 0x00000844, 72 0x00000839,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t gt215_pmu_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000c76, 93 0x00000c6b,
94 0x00000b19, 94 0x00000b0e,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t gt215_pmu_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000c9f, 115 0x00000c94,
116 0x00000c78, 116 0x00000c6d,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t gt215_pmu_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000cab, 137 0x00000ca0,
138 0x00000ca9, 138 0x00000c9e,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -229,26 +229,26 @@ uint32_t gt215_pmu_data[] = {
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00000001, 230 0x00000001,
231 0x00000000, 231 0x00000000,
232 0x00000551, 232 0x00000546,
233/* 0x037c: memx_func_next */ 233/* 0x037c: memx_func_next */
234 0x00000002, 234 0x00000002,
235 0x00000000, 235 0x00000000,
236 0x000005a8, 236 0x0000059d,
237 0x00000003, 237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x0000063a, 239 0x0000062f,
240 0x00040004, 240 0x00040004,
241 0x00000000, 241 0x00000000,
242 0x00000656, 242 0x0000064b,
243 0x00010005, 243 0x00010005,
244 0x00000000, 244 0x00000000,
245 0x00000673, 245 0x00000668,
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x000005f8, 248 0x000005ed,
249 0x00000007, 249 0x00000007,
250 0x00000000, 250 0x00000000,
251 0x0000067e, 251 0x00000673,
252/* 0x03c4: memx_func_tail */ 252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */ 253/* 0x03c4: memx_ts_start */
254 0x00000000, 254 0x00000000,
@@ -917,7 +917,7 @@ uint32_t gt215_pmu_data[] = {
917}; 917};
918 918
919uint32_t gt215_pmu_code[] = { 919uint32_t gt215_pmu_code[] = {
920 0x039e0ef5, 920 0x03930ef5,
921/* 0x0004: rd32 */ 921/* 0x0004: rd32 */
922 0x07a007f1, 922 0x07a007f1,
923 0xd00604b6, 923 0xd00604b6,
@@ -987,7 +987,7 @@ uint32_t gt215_pmu_code[] = {
987 0xbb9a0a98, 987 0xbb9a0a98,
988 0x1cf4029a, 988 0x1cf4029a,
989 0x01d7f00f, 989 0x01d7f00f,
990 0x02dd21f5, 990 0x02d221f5,
991 0x0ef494bd, 991 0x0ef494bd,
992/* 0x00f9: intr_watchdog_next_time */ 992/* 0x00f9: intr_watchdog_next_time */
993 0x9b0a9815, 993 0x9b0a9815,
@@ -1039,7 +1039,7 @@ uint32_t gt215_pmu_code[] = {
1039 0x48e7f1c0, 1039 0x48e7f1c0,
1040 0x53e3f14f, 1040 0x53e3f14f,
1041 0x00d7f054, 1041 0x00d7f054,
1042 0x034221f5, 1042 0x033721f5,
1043 0x07f1c0fc, 1043 0x07f1c0fc,
1044 0x04b604c0, 1044 0x04b604c0,
1045 0x000cd006, 1045 0x000cd006,
@@ -1048,820 +1048,818 @@ uint32_t gt215_pmu_code[] = {
1048 0x04b60688, 1048 0x04b60688,
1049 0x0009d006, 1049 0x0009d006,
1050/* 0x01ca: intr_skip_subintr */ 1050/* 0x01ca: intr_skip_subintr */
1051 0x89c404bd, 1051 0x97f104bd,
1052 0x070bf420, 1052 0x90bd00e0,
1053 0xffbfa4f1, 1053 0xf00489fd,
1054/* 0x01d4: intr_skip_pause */ 1054 0x04b60407,
1055 0xf44089c4, 1055 0x0008d006,
1056 0xa4f1070b, 1056 0x80fc04bd,
1057/* 0x01de: intr_skip_user0 */ 1057 0xfc0088fe,
1058 0x07f0ffbf, 1058 0xfce0fcf0,
1059 0x0604b604, 1059 0xfcc0fcd0,
1060 0xbd0008d0, 1060 0xfca0fcb0,
1061 0xfe80fc04, 1061 0xfc80fc90,
1062 0xf0fc0088, 1062 0x0032f400,
1063 0xd0fce0fc, 1063/* 0x01fa: ticks_from_ns */
1064 0xb0fcc0fc, 1064 0xc0f901f8,
1065 0x90fca0fc,
1066 0x00fc80fc,
1067 0xf80032f4,
1068/* 0x0205: ticks_from_ns */
1069 0xf9c0f901,
1070 0xcbd7f1b0,
1071 0x00d3f000,
1072 0x041321f5,
1073 0x03e8ccec,
1074 0xf400b4b0,
1075 0xeeec120b,
1076 0xd7f103e8,
1077 0xd3f000cb,
1078 0x1321f500,
1079/* 0x022d: ticks_from_ns_quit */
1080 0x02ceb904,
1081 0xc0fcb0fc,
1082/* 0x0236: ticks_from_us */
1083 0xc0f900f8,
1084 0xd7f1b0f9, 1065 0xd7f1b0f9,
1085 0xd3f000cb, 1066 0xd3f000cb,
1086 0x1321f500, 1067 0x0821f500,
1087 0x02ceb904, 1068 0xe8ccec04,
1088 0xf400b4b0, 1069 0x00b4b003,
1089 0xe4bd050b, 1070 0xec120bf4,
1090/* 0x0250: ticks_from_us_quit */ 1071 0xf103e8ee,
1091 0xc0fcb0fc, 1072 0xf000cbd7,
1092/* 0x0256: ticks_to_us */ 1073 0x21f500d3,
1093 0xd7f100f8, 1074/* 0x0222: ticks_from_ns_quit */
1094 0xd3f000cb, 1075 0xceb90408,
1095 0xecedff00, 1076 0xfcb0fc02,
1096/* 0x0262: timer */ 1077/* 0x022b: ticks_from_us */
1097 0x90f900f8, 1078 0xf900f8c0,
1098 0x32f480f9, 1079 0xf1b0f9c0,
1099 0x03f89810, 1080 0xf000cbd7,
1100 0xf40086b0, 1081 0x21f500d3,
1101 0x84bd651c, 1082 0xceb90408,
1102 0xb63807f0, 1083 0x00b4b002,
1103 0x08d00604, 1084 0xbd050bf4,
1104 0xf004bd00, 1085/* 0x0245: ticks_from_us_quit */
1105 0x84b63487, 1086 0xfcb0fce4,
1106 0x0088cf06, 1087/* 0x024b: ticks_to_us */
1107 0xbb9a0998, 1088 0xf100f8c0,
1108 0xe9bb0298, 1089 0xf000cbd7,
1109 0x03fe8000, 1090 0xedff00d3,
1110 0xb60887f0, 1091/* 0x0257: timer */
1111 0x88cf0684, 1092 0xf900f8ec,
1112 0x0284f000, 1093 0xf480f990,
1113 0xf0261bf4, 1094 0xf8981032,
1114 0x84b63487, 1095 0x0086b003,
1115 0x0088cf06, 1096 0xbd651cf4,
1116 0xf406e0b8, 1097 0x3807f084,
1117 0xe8b8090b,
1118 0x111cf406,
1119/* 0x02b8: timer_reset */
1120 0xb63407f0,
1121 0x0ed00604,
1122 0x8004bd00,
1123/* 0x02c6: timer_enable */
1124 0x87f09a0e,
1125 0x3807f001,
1126 0xd00604b6, 1098 0xd00604b6,
1127 0x04bd0008, 1099 0x04bd0008,
1128/* 0x02d4: timer_done */ 1100 0xb63487f0,
1129 0xfc1031f4, 1101 0x88cf0684,
1130 0xf890fc80, 1102 0x9a099800,
1131/* 0x02dd: send_proc */ 1103 0xbb0298bb,
1132 0xf980f900, 1104 0xfe8000e9,
1133 0x05e89890, 1105 0x0887f003,
1134 0xf004e998, 1106 0xcf0684b6,
1135 0x89b80486, 1107 0x84f00088,
1136 0x2a0bf406, 1108 0x261bf402,
1137 0x940398c4, 1109 0xb63487f0,
1138 0x80b60488, 1110 0x88cf0684,
1139 0x008ebb18, 1111 0x06e0b800,
1140 0x8000fa98, 1112 0xb8090bf4,
1141 0x8d80008a, 1113 0x1cf406e8,
1142 0x028c8001, 1114/* 0x02ad: timer_reset */
1143 0xb6038b80, 1115 0x3407f011,
1144 0x94f00190,
1145 0x04e98007,
1146/* 0x0317: send_done */
1147 0xfc0231f4,
1148 0xf880fc90,
1149/* 0x031d: find */
1150 0xf080f900,
1151 0x31f45887,
1152/* 0x0325: find_loop */
1153 0x008a9801,
1154 0xf406aeb8,
1155 0x80b6100b,
1156 0x6886b158,
1157 0xf01bf402,
1158/* 0x033b: find_done */
1159 0xb90132f4,
1160 0x80fc028e,
1161/* 0x0342: send */
1162 0x21f500f8,
1163 0x01f4031d,
1164/* 0x034b: recv */
1165 0xf900f897,
1166 0x9880f990,
1167 0xe99805e8,
1168 0x0132f404,
1169 0xf40689b8,
1170 0x89c43d0b,
1171 0x0180b603,
1172 0x800784f0,
1173 0xea9805e8,
1174 0xfef0f902,
1175 0xf0f9018f,
1176 0x9402efb9,
1177 0xe9bb0499,
1178 0x18e0b600,
1179 0x9803eb98,
1180 0xed9802ec,
1181 0x00ee9801,
1182 0xf0fca5f9,
1183 0xf400f8fe,
1184 0xf0fc0131,
1185/* 0x0398: recv_done */
1186 0x90fc80fc,
1187/* 0x039e: init */
1188 0x17f100f8,
1189 0x14b60108,
1190 0x0011cf06,
1191 0x010911e7,
1192 0xfe0814b6,
1193 0x17f10014,
1194 0x13f000e0,
1195 0x1c07f000,
1196 0xd00604b6,
1197 0x04bd0001,
1198 0xf0ff17f0,
1199 0x04b61407,
1200 0x0001d006,
1201 0x17f004bd,
1202 0x0015f102,
1203 0x1007f008,
1204 0xd00604b6, 1116 0xd00604b6,
1205 0x04bd0001, 1117 0x04bd000e,
1206 0x011a17f1, 1118/* 0x02bb: timer_enable */
1207 0xfe0013f0, 1119 0xf09a0e80,
1208 0x31f40010, 1120 0x07f00187,
1209 0x0117f010, 1121 0x0604b638,
1210 0xb63807f0, 1122 0xbd0008d0,
1123/* 0x02c9: timer_done */
1124 0x1031f404,
1125 0x90fc80fc,
1126/* 0x02d2: send_proc */
1127 0x80f900f8,
1128 0xe89890f9,
1129 0x04e99805,
1130 0xb80486f0,
1131 0x0bf40689,
1132 0x0398c42a,
1133 0xb6048894,
1134 0x8ebb1880,
1135 0x00fa9800,
1136 0x80008a80,
1137 0x8c80018d,
1138 0x038b8002,
1139 0xf00190b6,
1140 0xe9800794,
1141 0x0231f404,
1142/* 0x030c: send_done */
1143 0x80fc90fc,
1144/* 0x0312: find */
1145 0x80f900f8,
1146 0xf45887f0,
1147/* 0x031a: find_loop */
1148 0x8a980131,
1149 0x06aeb800,
1150 0xb6100bf4,
1151 0x86b15880,
1152 0x1bf40268,
1153 0x0132f4f0,
1154/* 0x0330: find_done */
1155 0xfc028eb9,
1156/* 0x0337: send */
1157 0xf500f880,
1158 0xf4031221,
1159 0x00f89701,
1160/* 0x0340: recv */
1161 0x80f990f9,
1162 0x9805e898,
1163 0x32f404e9,
1164 0x0689b801,
1165 0xc43d0bf4,
1166 0x80b60389,
1167 0x0784f001,
1168 0x9805e880,
1169 0xf0f902ea,
1170 0xf9018ffe,
1171 0x02efb9f0,
1172 0xbb049994,
1173 0xe0b600e9,
1174 0x03eb9818,
1175 0x9802ec98,
1176 0xee9801ed,
1177 0xfca5f900,
1178 0x00f8fef0,
1179 0xfc0131f4,
1180/* 0x038d: recv_done */
1181 0xfc80fcf0,
1182/* 0x0393: init */
1183 0xf100f890,
1184 0xb6010817,
1185 0x11cf0614,
1186 0x0911e700,
1187 0x0814b601,
1188 0xf10014fe,
1189 0xf000e017,
1190 0x07f00013,
1191 0x0604b61c,
1192 0xbd0001d0,
1193 0xff17f004,
1194 0xb61407f0,
1211 0x01d00604, 1195 0x01d00604,
1212 0xf004bd00, 1196 0xf004bd00,
1213/* 0x0402: init_proc */ 1197 0x15f10217,
1214 0xf19858f7, 1198 0x07f00800,
1215 0x0016b001, 1199 0x0604b610,
1216 0xf9fa0bf4, 1200 0xbd0001d0,
1217 0x58f0b615, 1201 0x1a17f104,
1218/* 0x0413: mulu32_32_64 */ 1202 0x0013f001,
1219 0xf9f20ef4, 1203 0xf40010fe,
1220 0xf920f910, 1204 0x17f01031,
1221 0x9540f930, 1205 0x3807f001,
1222 0xd29510e1, 1206 0xd00604b6,
1223 0xbdc4bd10, 1207 0x04bd0001,
1224 0xc0edffb4, 1208/* 0x03f7: init_proc */
1225 0xb9301dff, 1209 0x9858f7f0,
1226 0x34f10234, 1210 0x16b001f1,
1227 0x34b6ffff, 1211 0xfa0bf400,
1228 0x1045b610, 1212 0xf0b615f9,
1229 0xbb00c3bb, 1213 0xf20ef458,
1230 0xe2ff01b4, 1214/* 0x0408: mulu32_32_64 */
1231 0x0234b930, 1215 0x20f910f9,
1232 0xffff34f1, 1216 0x40f930f9,
1233 0xb61034b6, 1217 0x9510e195,
1234 0xc3bb1045, 1218 0xc4bd10d2,
1235 0x01b4bb00, 1219 0xedffb4bd,
1236 0xbb3012ff, 1220 0x301dffc0,
1237 0x40fc00b3, 1221 0xf10234b9,
1238 0x20fc30fc, 1222 0xb6ffff34,
1239 0x00f810fc, 1223 0x45b61034,
1240/* 0x0464: host_send */ 1224 0x00c3bb10,
1241 0x04b017f1, 1225 0xff01b4bb,
1242 0xcf0614b6, 1226 0x34b930e2,
1243 0x27f10011, 1227 0xff34f102,
1244 0x24b604a0, 1228 0x1034b6ff,
1245 0x0022cf06, 1229 0xbb1045b6,
1246 0xf40612b8, 1230 0xb4bb00c3,
1247 0x1ec4320b, 1231 0x3012ff01,
1248 0x04ee9407, 1232 0xfc00b3bb,
1249 0x0270e0b7, 1233 0xfc30fc40,
1250 0x9803eb98, 1234 0xf810fc20,
1251 0xed9802ec, 1235/* 0x0459: host_send */
1252 0x00ee9801, 1236 0xb017f100,
1253 0x034221f5,
1254 0xc40110b6,
1255 0x07f10f1e,
1256 0x04b604b0,
1257 0x000ed006,
1258 0x0ef404bd,
1259/* 0x04ad: host_send_done */
1260/* 0x04af: host_recv */
1261 0xf100f8ba,
1262 0xf14e4917,
1263 0xb8525413,
1264 0x0bf406e1,
1265/* 0x04bd: host_recv_wait */
1266 0xcc17f1aa,
1267 0x0614b604, 1237 0x0614b604,
1268 0xf10011cf, 1238 0xf10011cf,
1269 0xb604c827, 1239 0xb604a027,
1270 0x22cf0624, 1240 0x22cf0624,
1271 0x0816f000, 1241 0x0612b800,
1272 0xf40612b8, 1242 0xc4320bf4,
1273 0x23c4e60b, 1243 0xee94071e,
1274 0x0434b607, 1244 0x70e0b704,
1275 0x02f030b7, 1245 0x03eb9802,
1276 0x80033b80, 1246 0x9802ec98,
1277 0x3d80023c, 1247 0xee9801ed,
1278 0x003e8001, 1248 0x3721f500,
1279 0xf00120b6, 1249 0x0110b603,
1280 0x07f10f24, 1250 0xf10f1ec4,
1281 0x04b604c8, 1251 0xb604b007,
1282 0x0002d006, 1252 0x0ed00604,
1283 0x27f004bd, 1253 0xf404bd00,
1284 0x0007f040, 1254/* 0x04a2: host_send_done */
1285 0xd00604b6, 1255 0x00f8ba0e,
1286 0x04bd0002, 1256/* 0x04a4: host_recv */
1287/* 0x0512: host_init */ 1257 0x4e4917f1,
1288 0x17f100f8, 1258 0x525413f1,
1259 0xf406e1b8,
1260/* 0x04b2: host_recv_wait */
1261 0x17f1aa0b,
1262 0x14b604cc,
1263 0x0011cf06,
1264 0x04c827f1,
1265 0xcf0624b6,
1266 0x16f00022,
1267 0x0612b808,
1268 0xc4e60bf4,
1269 0x34b60723,
1270 0xf030b704,
1271 0x033b8002,
1272 0x80023c80,
1273 0x3e80013d,
1274 0x0120b600,
1275 0xf10f24f0,
1276 0xb604c807,
1277 0x02d00604,
1278 0xf004bd00,
1279 0x07f04027,
1280 0x0604b600,
1281 0xbd0002d0,
1282/* 0x0507: host_init */
1283 0xf100f804,
1284 0xb6008017,
1285 0x15f11014,
1286 0x07f10270,
1287 0x04b604d0,
1288 0x0001d006,
1289 0x17f104bd,
1289 0x14b60080, 1290 0x14b60080,
1290 0x7015f110, 1291 0xf015f110,
1291 0xd007f102, 1292 0xdc07f102,
1292 0x0604b604, 1293 0x0604b604,
1293 0xbd0001d0, 1294 0xbd0001d0,
1294 0x8017f104, 1295 0x0117f004,
1295 0x1014b600, 1296 0x04c407f1,
1296 0x02f015f1,
1297 0x04dc07f1,
1298 0xd00604b6, 1297 0xd00604b6,
1299 0x04bd0001, 1298 0x04bd0001,
1300 0xf10117f0, 1299/* 0x0546: memx_func_enter */
1301 0xb604c407, 1300 0x87f100f8,
1302 0x01d00604, 1301 0x8eb91610,
1303 0xf804bd00, 1302 0x0421f402,
1304/* 0x0551: memx_func_enter */ 1303 0xf102d7b9,
1305 0x1087f100, 1304 0xf1fffc67,
1306 0x028eb916, 1305 0xfdffff63,
1307 0xb90421f4, 1306 0x67f10476,
1308 0x67f102d7, 1307 0x76fd0002,
1309 0x63f1fffc, 1308 0xf980f905,
1310 0x76fdffff, 1309 0xfcd0fc70,
1311 0x0267f104, 1310 0x3f21f4e0,
1312 0x0576fd00,
1313 0x70f980f9,
1314 0xe0fcd0fc,
1315 0xf03f21f4,
1316 0x07f10467,
1317 0x04b607e0,
1318 0x0006d006,
1319/* 0x058a: memx_func_enter_wait */
1320 0x67f104bd,
1321 0x64b607c0,
1322 0x0066cf06,
1323 0xf40464f0,
1324 0x67f0f30b,
1325 0x0664b62c,
1326 0x800066cf,
1327 0x00f8f106,
1328/* 0x05a8: memx_func_leave */
1329 0xb62c67f0,
1330 0x66cf0664,
1331 0xf2068000,
1332 0xf10467f0, 1311 0xf10467f0,
1333 0xb607e407, 1312 0xb607e007,
1334 0x06d00604, 1313 0x06d00604,
1335/* 0x05c3: memx_func_leave_wait */ 1314/* 0x057f: memx_func_enter_wait */
1336 0xf104bd00, 1315 0xf104bd00,
1337 0xb607c067, 1316 0xb607c067,
1338 0x66cf0664, 1317 0x66cf0664,
1339 0x0464f000, 1318 0x0464f000,
1340 0xf1f31bf4, 1319 0xf0f30bf4,
1341 0xb9161087, 1320 0x64b62c67,
1342 0x21f4028e, 1321 0x0066cf06,
1343 0x02d7b904, 1322 0xf8f10680,
1344 0xffcc67f1, 1323/* 0x059d: memx_func_leave */
1345 0xffff63f1, 1324 0x2c67f000,
1346 0xf90476fd, 1325 0xcf0664b6,
1347 0xfc70f980, 1326 0x06800066,
1348 0xf4e0fcd0, 1327 0x0467f0f2,
1349 0x00f83f21, 1328 0x07e407f1,
1350/* 0x05f8: memx_func_wait_vblank */ 1329 0xd00604b6,
1351 0xb0001698, 1330 0x04bd0006,
1352 0x0bf40066, 1331/* 0x05b8: memx_func_leave_wait */
1353 0x0166b013, 1332 0x07c067f1,
1354 0xf4060bf4, 1333 0xcf0664b6,
1355/* 0x060a: memx_func_wait_vblank_head1 */ 1334 0x64f00066,
1356 0x77f12e0e, 1335 0xf31bf404,
1357 0x0ef40020, 1336 0x161087f1,
1358/* 0x0611: memx_func_wait_vblank_head0 */ 1337 0xf4028eb9,
1359 0x0877f107, 1338 0xd7b90421,
1360/* 0x0615: memx_func_wait_vblank_0 */ 1339 0xcc67f102,
1361 0xc467f100, 1340 0xff63f1ff,
1362 0x0664b607, 1341 0x0476fdff,
1363 0xfd0066cf, 1342 0x70f980f9,
1364 0x1bf40467,
1365/* 0x0625: memx_func_wait_vblank_1 */
1366 0xc467f1f3,
1367 0x0664b607,
1368 0xfd0066cf,
1369 0x0bf40467,
1370/* 0x0635: memx_func_wait_vblank_fini */
1371 0x0410b6f3,
1372/* 0x063a: memx_func_wr32 */
1373 0x169800f8,
1374 0x01159800,
1375 0xf90810b6,
1376 0xfc50f960,
1377 0xf4e0fcd0,
1378 0x42b63f21,
1379 0xe91bf402,
1380/* 0x0656: memx_func_wait */
1381 0x87f000f8,
1382 0x0684b62c,
1383 0x980088cf,
1384 0x1d98001e,
1385 0x021c9801,
1386 0xb6031b98,
1387 0x21f41010,
1388/* 0x0673: memx_func_delay */
1389 0x9800f8a4,
1390 0x10b6001e,
1391 0x7f21f404,
1392/* 0x067e: memx_func_train */
1393 0x57f100f8,
1394 0x77f10003,
1395 0x97f10000,
1396 0x93f00000,
1397 0x029eb970,
1398 0xb90421f4,
1399 0xe7f102d8,
1400 0x21f42710,
1401/* 0x069d: memx_func_train_loop_outer */
1402 0x0158e07f,
1403 0x0083f101,
1404 0xe097f102,
1405 0x1193f011,
1406 0x80f990f9,
1407 0xe0fcd0fc, 1343 0xe0fcd0fc,
1408 0xf93f21f4, 1344 0xf83f21f4,
1409 0x0067f150, 1345/* 0x05ed: memx_func_wait_vblank */
1410/* 0x06bd: memx_func_train_loop_inner */ 1346 0x00169800,
1411 0x1187f100, 1347 0xf40066b0,
1412 0x9068ff11, 1348 0x66b0130b,
1413 0xfd109894, 1349 0x060bf401,
1414 0x97f10589, 1350/* 0x05ff: memx_func_wait_vblank_head1 */
1415 0x93f00720, 1351 0xf12e0ef4,
1416 0xf990f910, 1352 0xf4002077,
1417 0xfcd0fc80, 1353/* 0x0606: memx_func_wait_vblank_head0 */
1418 0x3f21f4e0, 1354 0x77f1070e,
1419 0x008097f1, 1355/* 0x060a: memx_func_wait_vblank_0 */
1420 0xb91093f0, 1356 0x67f10008,
1421 0x21f4029e, 1357 0x64b607c4,
1422 0x02d8b904, 1358 0x0066cf06,
1423 0xf92088c5, 1359 0xf40467fd,
1424 0xfc80f990, 1360/* 0x061a: memx_func_wait_vblank_1 */
1425 0xf4e0fcd0, 1361 0x67f1f31b,
1426 0x97f13f21, 1362 0x64b607c4,
1427 0x93f0053c, 1363 0x0066cf06,
1428 0x0287f110, 1364 0xf40467fd,
1429 0x0083f130, 1365/* 0x062a: memx_func_wait_vblank_fini */
1430 0xf990f980, 1366 0x10b6f30b,
1431 0xfcd0fc80, 1367/* 0x062f: memx_func_wr32 */
1432 0x3f21f4e0, 1368 0x9800f804,
1433 0x0560e7f1, 1369 0x15980016,
1434 0xf110e3f0, 1370 0x0810b601,
1435 0xf10000d7, 1371 0x50f960f9,
1436 0x908000d3, 1372 0xe0fcd0fc,
1437 0xb7f100dc, 1373 0xb63f21f4,
1438 0xb3f08480, 1374 0x1bf40242,
1439 0xa421f41e, 1375/* 0x064b: memx_func_wait */
1440 0x000057f1, 1376 0xf000f8e9,
1441 0xffff97f1, 1377 0x84b62c87,
1442 0x830093f1, 1378 0x0088cf06,
1443/* 0x073c: memx_func_train_loop_4x */ 1379 0x98001e98,
1444 0x0080a7f1, 1380 0x1c98011d,
1445 0xb910a3f0, 1381 0x031b9802,
1446 0x21f402ae, 1382 0xf41010b6,
1447 0x02d8b904, 1383 0x00f8a421,
1448 0xffdfb7f1, 1384/* 0x0668: memx_func_delay */
1449 0xffffb3f1, 1385 0xb6001e98,
1450 0xf9048bfd, 1386 0x21f40410,
1451 0xfc80f9a0, 1387/* 0x0673: memx_func_train */
1452 0xf4e0fcd0, 1388 0xf100f87f,
1453 0xa7f13f21, 1389 0xf1000357,
1454 0xa3f0053c, 1390 0xf1000077,
1455 0x0287f110, 1391 0xf0000097,
1456 0x0083f130, 1392 0x9eb97093,
1457 0xf9a0f980, 1393 0x0421f402,
1394 0xf102d8b9,
1395 0xf42710e7,
1396/* 0x0692: memx_func_train_loop_outer */
1397 0x58e07f21,
1398 0x83f10101,
1399 0x97f10200,
1400 0x93f011e0,
1401 0xf990f911,
1458 0xfcd0fc80, 1402 0xfcd0fc80,
1459 0x3f21f4e0, 1403 0x3f21f4e0,
1460 0x0560e7f1, 1404 0x67f150f9,
1461 0xf110e3f0, 1405/* 0x06b2: memx_func_train_loop_inner */
1462 0xf10000d7, 1406 0x87f10000,
1463 0xb98000d3, 1407 0x68ff1111,
1464 0xb7f102dc, 1408 0x10989490,
1465 0xb3f02710, 1409 0xf10589fd,
1466 0xa421f400, 1410 0xf0072097,
1467 0xf402eeb9, 1411 0x90f91093,
1468 0xddb90421, 1412 0xd0fc80f9,
1469 0x949dff02, 1413 0x21f4e0fc,
1414 0x8097f13f,
1415 0x1093f000,
1416 0xf4029eb9,
1417 0xd8b90421,
1418 0x2088c502,
1419 0x80f990f9,
1420 0xe0fcd0fc,
1421 0xf13f21f4,
1422 0xf0053c97,
1423 0x87f11093,
1424 0x83f13002,
1425 0x90f98000,
1426 0xd0fc80f9,
1427 0x21f4e0fc,
1428 0x60e7f13f,
1429 0x10e3f005,
1430 0x0000d7f1,
1431 0x8000d3f1,
1432 0xf100dc90,
1433 0xf08480b7,
1434 0x21f41eb3,
1435 0x0057f1a4,
1436 0xff97f100,
1437 0x0093f1ff,
1438/* 0x0731: memx_func_train_loop_4x */
1439 0x80a7f183,
1440 0x10a3f000,
1441 0xf402aeb9,
1442 0xd8b90421,
1443 0xdfb7f102,
1444 0xffb3f1ff,
1445 0x048bfdff,
1446 0x80f9a0f9,
1447 0xe0fcd0fc,
1448 0xf13f21f4,
1449 0xf0053ca7,
1450 0x87f110a3,
1451 0x83f13002,
1452 0xa0f98000,
1453 0xd0fc80f9,
1454 0x21f4e0fc,
1455 0x60e7f13f,
1456 0x10e3f005,
1457 0x0000d7f1,
1458 0x8000d3f1,
1459 0xf102dcb9,
1460 0xf02710b7,
1461 0x21f400b3,
1462 0x02eeb9a4,
1463 0xb90421f4,
1464 0x9dff02dd,
1465 0x0150b694,
1466 0xf4045670,
1467 0x7aa0921e,
1468 0xa9800bcc,
1469 0x0160b600,
1470 0x700470b6,
1471 0x1ef51066,
1472 0x50fcff00,
1470 0x700150b6, 1473 0x700150b6,
1471 0x1ef40456, 1474 0x1ef50756,
1472 0xcc7aa092, 1475 0x00f8fed4,
1473 0x00a9800b, 1476/* 0x07c4: memx_exec */
1474 0xb60160b6, 1477 0xd0f9e0f9,
1475 0x66700470, 1478 0xb902c1b9,
1476 0x001ef510, 1479/* 0x07ce: memx_exec_next */
1477 0xb650fcff, 1480 0x139802b2,
1478 0x56700150, 1481 0x0410b600,
1479 0xd41ef507, 1482 0x01f034e7,
1480/* 0x07cf: memx_exec */ 1483 0x01e033e7,
1481 0xf900f8fe, 1484 0xf00132b6,
1482 0xb9d0f9e0, 1485 0x35980c30,
1483 0xb2b902c1, 1486 0xb855f9de,
1484/* 0x07d9: memx_exec_next */ 1487 0x1ef40612,
1485 0x00139802, 1488 0xf10b98e4,
1486 0xe70410b6, 1489 0xbbf20c98,
1487 0xe701f034, 1490 0xb7f102cb,
1488 0xb601e033, 1491 0xb4b607c4,
1489 0x30f00132, 1492 0x00bbcf06,
1490 0xde35980c, 1493 0xe0fcd0fc,
1491 0x12b855f9, 1494 0x033721f5,
1492 0xe41ef406, 1495/* 0x080a: memx_info */
1493 0x98f10b98, 1496 0xc67000f8,
1494 0xcbbbf20c, 1497 0x0e0bf401,
1495 0xc4b7f102, 1498/* 0x0810: memx_info_data */
1496 0x06b4b607, 1499 0x03ccc7f1,
1497 0xfc00bbcf, 1500 0x0800b7f1,
1498 0xf5e0fcd0, 1501/* 0x081b: memx_info_train */
1499 0xf8034221, 1502 0xf10b0ef4,
1500/* 0x0815: memx_info */ 1503 0xf10bccc7,
1501 0x01c67000, 1504/* 0x0823: memx_info_send */
1502/* 0x081b: memx_info_data */ 1505 0xf50100b7,
1503 0xf10e0bf4, 1506 0xf8033721,
1504 0xf103ccc7, 1507/* 0x0829: memx_recv */
1505 0xf40800b7, 1508 0x01d6b000,
1506/* 0x0826: memx_info_train */ 1509 0xb0980bf4,
1507 0xc7f10b0e, 1510 0x0bf400d6,
1508 0xb7f10bcc, 1511/* 0x0837: memx_init */
1509/* 0x082e: memx_info_send */ 1512 0xf800f8d8,
1510 0x21f50100, 1513/* 0x0839: perf_recv */
1511 0x00f80342, 1514/* 0x083b: perf_init */
1512/* 0x0834: memx_recv */ 1515 0xf800f800,
1513 0xf401d6b0, 1516/* 0x083d: i2c_drive_scl */
1514 0xd6b0980b, 1517 0x0036b000,
1515 0xd80bf400, 1518 0xf1110bf4,
1516/* 0x0842: memx_init */ 1519 0xb607e007,
1517 0x00f800f8, 1520 0x01d00604,
1518/* 0x0844: perf_recv */ 1521 0xf804bd00,
1519/* 0x0846: perf_init */ 1522/* 0x0851: i2c_drive_scl_lo */
1520 0x00f800f8, 1523 0xe407f100,
1521/* 0x0848: i2c_drive_scl */ 1524 0x0604b607,
1522 0xf40036b0, 1525 0xbd0001d0,
1523 0x07f1110b, 1526/* 0x085f: i2c_drive_sda */
1524 0x04b607e0, 1527 0xb000f804,
1525 0x0001d006, 1528 0x0bf40036,
1526 0x00f804bd, 1529 0xe007f111,
1527/* 0x085c: i2c_drive_scl_lo */ 1530 0x0604b607,
1528 0x07e407f1, 1531 0xbd0002d0,
1529 0xd00604b6, 1532/* 0x0873: i2c_drive_sda_lo */
1530 0x04bd0001, 1533 0xf100f804,
1531/* 0x086a: i2c_drive_sda */ 1534 0xb607e407,
1532 0x36b000f8, 1535 0x02d00604,
1533 0x110bf400, 1536 0xf804bd00,
1534 0x07e007f1, 1537/* 0x0881: i2c_sense_scl */
1535 0xd00604b6, 1538 0x0132f400,
1536 0x04bd0002, 1539 0x07c437f1,
1537/* 0x087e: i2c_drive_sda_lo */ 1540 0xcf0634b6,
1538 0x07f100f8, 1541 0x31fd0033,
1539 0x04b607e4, 1542 0x060bf404,
1540 0x0002d006, 1543/* 0x0897: i2c_sense_scl_done */
1541 0x00f804bd, 1544 0xf80131f4,
1542/* 0x088c: i2c_sense_scl */ 1545/* 0x0899: i2c_sense_sda */
1543 0xf10132f4, 1546 0x0132f400,
1544 0xb607c437, 1547 0x07c437f1,
1545 0x33cf0634, 1548 0xcf0634b6,
1546 0x0431fd00, 1549 0x32fd0033,
1547 0xf4060bf4, 1550 0x060bf404,
1548/* 0x08a2: i2c_sense_scl_done */ 1551/* 0x08af: i2c_sense_sda_done */
1549 0x00f80131, 1552 0xf80131f4,
1550/* 0x08a4: i2c_sense_sda */ 1553/* 0x08b1: i2c_raise_scl */
1551 0xf10132f4, 1554 0xf140f900,
1552 0xb607c437, 1555 0xf0089847,
1553 0x33cf0634, 1556 0x21f50137,
1554 0x0432fd00, 1557/* 0x08be: i2c_raise_scl_wait */
1555 0xf4060bf4, 1558 0xe7f1083d,
1556/* 0x08ba: i2c_sense_sda_done */ 1559 0x21f403e8,
1557 0x00f80131, 1560 0x8121f57f,
1558/* 0x08bc: i2c_raise_scl */ 1561 0x0901f408,
1559 0x47f140f9, 1562 0xf40142b6,
1560 0x37f00898, 1563/* 0x08d2: i2c_raise_scl_done */
1561 0x4821f501, 1564 0x40fcef1b,
1562/* 0x08c9: i2c_raise_scl_wait */ 1565/* 0x08d6: i2c_start */
1566 0x21f500f8,
1567 0x11f40881,
1568 0x9921f50d,
1569 0x0611f408,
1570/* 0x08e7: i2c_start_rep */
1571 0xf0300ef4,
1572 0x21f50037,
1573 0x37f0083d,
1574 0x5f21f501,
1575 0x0076bb08,
1576 0xf90465b6,
1577 0x04659450,
1578 0xbd0256bb,
1579 0x0475fd50,
1580 0x21f550fc,
1581 0x64b608b1,
1582 0x1f11f404,
1583/* 0x0914: i2c_start_send */
1584 0xf50037f0,
1585 0xf1085f21,
1586 0xf41388e7,
1587 0x37f07f21,
1588 0x3d21f500,
1589 0x88e7f108,
1590 0x7f21f413,
1591/* 0x0930: i2c_start_out */
1592/* 0x0932: i2c_stop */
1593 0x37f000f8,
1594 0x3d21f500,
1595 0x0037f008,
1596 0x085f21f5,
1597 0x03e8e7f1,
1598 0xf07f21f4,
1599 0x21f50137,
1600 0xe7f1083d,
1601 0x21f41388,
1602 0x0137f07f,
1603 0x085f21f5,
1604 0x1388e7f1,
1605 0xf87f21f4,
1606/* 0x0965: i2c_bitw */
1607 0x5f21f500,
1563 0xe8e7f108, 1608 0xe8e7f108,
1564 0x7f21f403, 1609 0x7f21f403,
1565 0x088c21f5,
1566 0xb60901f4,
1567 0x1bf40142,
1568/* 0x08dd: i2c_raise_scl_done */
1569 0xf840fcef,
1570/* 0x08e1: i2c_start */
1571 0x8c21f500,
1572 0x0d11f408,
1573 0x08a421f5,
1574 0xf40611f4,
1575/* 0x08f2: i2c_start_rep */
1576 0x37f0300e,
1577 0x4821f500,
1578 0x0137f008,
1579 0x086a21f5,
1580 0xb60076bb, 1610 0xb60076bb,
1581 0x50f90465, 1611 0x50f90465,
1582 0xbb046594, 1612 0xbb046594,
1583 0x50bd0256, 1613 0x50bd0256,
1584 0xfc0475fd, 1614 0xfc0475fd,
1585 0xbc21f550, 1615 0xb121f550,
1586 0x0464b608, 1616 0x0464b608,
1587/* 0x091f: i2c_start_send */ 1617 0xf11811f4,
1588 0xf01f11f4, 1618 0xf41388e7,
1589 0x21f50037,
1590 0xe7f1086a,
1591 0x21f41388,
1592 0x0037f07f,
1593 0x084821f5,
1594 0x1388e7f1,
1595/* 0x093b: i2c_start_out */
1596 0xf87f21f4,
1597/* 0x093d: i2c_stop */
1598 0x0037f000,
1599 0x084821f5,
1600 0xf50037f0,
1601 0xf1086a21,
1602 0xf403e8e7,
1603 0x37f07f21, 1619 0x37f07f21,
1604 0x4821f501, 1620 0x3d21f500,
1605 0x88e7f108, 1621 0x88e7f108,
1606 0x7f21f413, 1622 0x7f21f413,
1607 0xf50137f0, 1623/* 0x09a4: i2c_bitw_out */
1608 0xf1086a21, 1624/* 0x09a6: i2c_bitr */
1609 0xf41388e7, 1625 0x37f000f8,
1610 0x00f87f21, 1626 0x5f21f501,
1611/* 0x0970: i2c_bitw */ 1627 0xe8e7f108,
1612 0x086a21f5, 1628 0x7f21f403,
1613 0x03e8e7f1, 1629 0xb60076bb,
1614 0xbb7f21f4, 1630 0x50f90465,
1615 0x65b60076, 1631 0xbb046594,
1616 0x9450f904, 1632 0x50bd0256,
1617 0x56bb0465, 1633 0xfc0475fd,
1618 0xfd50bd02, 1634 0xb121f550,
1619 0x50fc0475, 1635 0x0464b608,
1620 0x08bc21f5, 1636 0xf51b11f4,
1621 0xf40464b6, 1637 0xf0089921,
1622 0xe7f11811, 1638 0x21f50037,
1639 0xe7f1083d,
1623 0x21f41388, 1640 0x21f41388,
1624 0x0037f07f, 1641 0x013cf07f,
1625 0x084821f5, 1642/* 0x09eb: i2c_bitr_done */
1626 0x1388e7f1, 1643 0xf80131f4,
1627/* 0x09af: i2c_bitw_out */ 1644/* 0x09ed: i2c_get_byte */
1628 0xf87f21f4, 1645 0x0057f000,
1629/* 0x09b1: i2c_bitr */ 1646/* 0x09f3: i2c_get_byte_next */
1630 0x0137f000, 1647 0xb60847f0,
1631 0x086a21f5, 1648 0x76bb0154,
1632 0x03e8e7f1, 1649 0x0465b600,
1633 0xbb7f21f4, 1650 0x659450f9,
1634 0x65b60076, 1651 0x0256bb04,
1635 0x9450f904, 1652 0x75fd50bd,
1636 0x56bb0465, 1653 0xf550fc04,
1637 0xfd50bd02, 1654 0xb609a621,
1638 0x50fc0475, 1655 0x11f40464,
1639 0x08bc21f5, 1656 0x0553fd2b,
1640 0xf40464b6, 1657 0xf40142b6,
1641 0x21f51b11, 1658 0x37f0d81b,
1642 0x37f008a4,
1643 0x4821f500,
1644 0x88e7f108,
1645 0x7f21f413,
1646 0xf4013cf0,
1647/* 0x09f6: i2c_bitr_done */
1648 0x00f80131,
1649/* 0x09f8: i2c_get_byte */
1650 0xf00057f0,
1651/* 0x09fe: i2c_get_byte_next */
1652 0x54b60847,
1653 0x0076bb01, 1659 0x0076bb01,
1654 0xf90465b6, 1660 0xf90465b6,
1655 0x04659450, 1661 0x04659450,
1656 0xbd0256bb, 1662 0xbd0256bb,
1657 0x0475fd50, 1663 0x0475fd50,
1658 0x21f550fc, 1664 0x21f550fc,
1659 0x64b609b1, 1665 0x64b60965,
1660 0x2b11f404, 1666/* 0x0a3d: i2c_get_byte_done */
1661 0xb60553fd, 1667/* 0x0a3f: i2c_put_byte */
1662 0x1bf40142, 1668 0xf000f804,
1663 0x0137f0d8, 1669/* 0x0a42: i2c_put_byte_next */
1664 0xb60076bb, 1670 0x42b60847,
1665 0x50f90465, 1671 0x3854ff01,
1666 0xbb046594,
1667 0x50bd0256,
1668 0xfc0475fd,
1669 0x7021f550,
1670 0x0464b609,
1671/* 0x0a48: i2c_get_byte_done */
1672/* 0x0a4a: i2c_put_byte */
1673 0x47f000f8,
1674/* 0x0a4d: i2c_put_byte_next */
1675 0x0142b608,
1676 0xbb3854ff,
1677 0x65b60076,
1678 0x9450f904,
1679 0x56bb0465,
1680 0xfd50bd02,
1681 0x50fc0475,
1682 0x097021f5,
1683 0xf40464b6,
1684 0x46b03411,
1685 0xd81bf400,
1686 0xb60076bb, 1672 0xb60076bb,
1687 0x50f90465, 1673 0x50f90465,
1688 0xbb046594, 1674 0xbb046594,
1689 0x50bd0256, 1675 0x50bd0256,
1690 0xfc0475fd, 1676 0xfc0475fd,
1691 0xb121f550, 1677 0x6521f550,
1692 0x0464b609, 1678 0x0464b609,
1693 0xbb0f11f4, 1679 0xb03411f4,
1694 0x36b00076, 1680 0x1bf40046,
1695 0x061bf401, 1681 0x0076bbd8,
1696/* 0x0aa3: i2c_put_byte_done */
1697 0xf80132f4,
1698/* 0x0aa5: i2c_addr */
1699 0x0076bb00,
1700 0xf90465b6, 1682 0xf90465b6,
1701 0x04659450, 1683 0x04659450,
1702 0xbd0256bb, 1684 0xbd0256bb,
1703 0x0475fd50, 1685 0x0475fd50,
1704 0x21f550fc, 1686 0x21f550fc,
1705 0x64b608e1, 1687 0x64b609a6,
1706 0x2911f404, 1688 0x0f11f404,
1707 0x012ec3e7, 1689 0xb00076bb,
1708 0xfd0134b6, 1690 0x1bf40136,
1709 0x76bb0553, 1691 0x0132f406,
1692/* 0x0a98: i2c_put_byte_done */
1693/* 0x0a9a: i2c_addr */
1694 0x76bb00f8,
1710 0x0465b600, 1695 0x0465b600,
1711 0x659450f9, 1696 0x659450f9,
1712 0x0256bb04, 1697 0x0256bb04,
1713 0x75fd50bd, 1698 0x75fd50bd,
1714 0xf550fc04, 1699 0xf550fc04,
1715 0xb60a4a21, 1700 0xb608d621,
1716/* 0x0aea: i2c_addr_done */ 1701 0x11f40464,
1717 0x00f80464, 1702 0x2ec3e729,
1718/* 0x0aec: i2c_acquire_addr */ 1703 0x0134b601,
1719 0xb6f8cec7, 1704 0xbb0553fd,
1720 0xe0b702e4,
1721 0xee980d1c,
1722/* 0x0afb: i2c_acquire */
1723 0xf500f800,
1724 0xf40aec21,
1725 0xd9f00421,
1726 0x3f21f403,
1727/* 0x0b0a: i2c_release */
1728 0x21f500f8,
1729 0x21f40aec,
1730 0x03daf004,
1731 0xf83f21f4,
1732/* 0x0b19: i2c_recv */
1733 0x0132f400,
1734 0xb6f8c1c7,
1735 0x16b00214,
1736 0x3a1ff528,
1737 0xf413a001,
1738 0x0032980c,
1739 0x0ccc13a0,
1740 0xf4003198,
1741 0xd0f90231,
1742 0xd0f9e0f9,
1743 0x000067f1,
1744 0x100063f1,
1745 0xbb016792,
1746 0x65b60076, 1705 0x65b60076,
1747 0x9450f904, 1706 0x9450f904,
1748 0x56bb0465, 1707 0x56bb0465,
1749 0xfd50bd02, 1708 0xfd50bd02,
1750 0x50fc0475, 1709 0x50fc0475,
1751 0x0afb21f5, 1710 0x0a3f21f5,
1752 0xfc0464b6, 1711/* 0x0adf: i2c_addr_done */
1753 0x00d6b0d0, 1712 0xf80464b6,
1754 0x00b31bf5, 1713/* 0x0ae1: i2c_acquire_addr */
1755 0xbb0057f0, 1714 0xf8cec700,
1715 0xb702e4b6,
1716 0x980d1ce0,
1717 0x00f800ee,
1718/* 0x0af0: i2c_acquire */
1719 0x0ae121f5,
1720 0xf00421f4,
1721 0x21f403d9,
1722/* 0x0aff: i2c_release */
1723 0xf500f83f,
1724 0xf40ae121,
1725 0xdaf00421,
1726 0x3f21f403,
1727/* 0x0b0e: i2c_recv */
1728 0x32f400f8,
1729 0xf8c1c701,
1730 0xb00214b6,
1731 0x1ff52816,
1732 0x13a0013a,
1733 0x32980cf4,
1734 0xcc13a000,
1735 0x0031980c,
1736 0xf90231f4,
1737 0xf9e0f9d0,
1738 0x0067f1d0,
1739 0x0063f100,
1740 0x01679210,
1741 0xb60076bb,
1742 0x50f90465,
1743 0xbb046594,
1744 0x50bd0256,
1745 0xfc0475fd,
1746 0xf021f550,
1747 0x0464b60a,
1748 0xd6b0d0fc,
1749 0xb31bf500,
1750 0x0057f000,
1751 0xb60076bb,
1752 0x50f90465,
1753 0xbb046594,
1754 0x50bd0256,
1755 0xfc0475fd,
1756 0x9a21f550,
1757 0x0464b60a,
1758 0x00d011f5,
1759 0xbbe0c5c7,
1756 0x65b60076, 1760 0x65b60076,
1757 0x9450f904, 1761 0x9450f904,
1758 0x56bb0465, 1762 0x56bb0465,
1759 0xfd50bd02, 1763 0xfd50bd02,
1760 0x50fc0475, 1764 0x50fc0475,
1761 0x0aa521f5, 1765 0x0a3f21f5,
1762 0xf50464b6, 1766 0xf50464b6,
1763 0xc700d011, 1767 0xf000ad11,
1764 0x76bbe0c5, 1768 0x76bb0157,
1765 0x0465b600, 1769 0x0465b600,
1766 0x659450f9, 1770 0x659450f9,
1767 0x0256bb04, 1771 0x0256bb04,
1768 0x75fd50bd, 1772 0x75fd50bd,
1769 0xf550fc04, 1773 0xf550fc04,
1770 0xb60a4a21, 1774 0xb60a9a21,
1771 0x11f50464, 1775 0x11f50464,
1772 0x57f000ad, 1776 0x76bb008a,
1773 0x0076bb01, 1777 0x0465b600,
1774 0xf90465b6, 1778 0x659450f9,
1775 0x04659450, 1779 0x0256bb04,
1776 0xbd0256bb, 1780 0x75fd50bd,
1777 0x0475fd50, 1781 0xf550fc04,
1778 0x21f550fc, 1782 0xb609ed21,
1779 0x64b60aa5, 1783 0x11f40464,
1780 0x8a11f504, 1784 0xe05bcb6a,
1781 0x0076bb00, 1785 0xb60076bb,
1782 0xf90465b6, 1786 0x50f90465,
1783 0x04659450, 1787 0xbb046594,
1784 0xbd0256bb, 1788 0x50bd0256,
1785 0x0475fd50, 1789 0xfc0475fd,
1786 0x21f550fc, 1790 0x3221f550,
1787 0x64b609f8, 1791 0x0464b609,
1788 0x6a11f404, 1792 0xbd025bb9,
1789 0xbbe05bcb, 1793 0x430ef474,
1790 0x65b60076, 1794/* 0x0c14: i2c_recv_not_rd08 */
1791 0x9450f904, 1795 0xf401d6b0,
1792 0x56bb0465, 1796 0x57f03d1b,
1793 0xfd50bd02, 1797 0x9a21f500,
1794 0x50fc0475, 1798 0x3311f40a,
1795 0x093d21f5, 1799 0xf5e0c5c7,
1796 0xb90464b6, 1800 0xf40a3f21,
1797 0x74bd025b, 1801 0x57f02911,
1798/* 0x0c1f: i2c_recv_not_rd08 */ 1802 0x9a21f500,
1799 0xb0430ef4, 1803 0x1f11f40a,
1800 0x1bf401d6, 1804 0xf5e0b5c7,
1801 0x0057f03d, 1805 0xf40a3f21,
1802 0x0aa521f5, 1806 0x21f51511,
1803 0xc73311f4, 1807 0x74bd0932,
1804 0x21f5e0c5, 1808 0xf408c5c7,
1805 0x11f40a4a, 1809 0x32f4091b,
1806 0x0057f029, 1810 0x030ef402,
1807 0x0aa521f5, 1811/* 0x0c54: i2c_recv_not_wr08 */
1808 0xc71f11f4, 1812/* 0x0c54: i2c_recv_done */
1809 0x21f5e0b5, 1813 0xf5f8cec7,
1810 0x11f40a4a, 1814 0xfc0aff21,
1811 0x3d21f515, 1815 0xf4d0fce0,
1812 0xc774bd09, 1816 0x7cb90a12,
1813 0x1bf408c5, 1817 0x3721f502,
1814 0x0232f409, 1818/* 0x0c69: i2c_recv_exit */
1815/* 0x0c5f: i2c_recv_not_wr08 */ 1819/* 0x0c6b: i2c_init */
1816/* 0x0c5f: i2c_recv_done */ 1820 0xf800f803,
1817 0xc7030ef4, 1821/* 0x0c6d: test_recv */
1818 0x21f5f8ce, 1822 0xd817f100,
1819 0xe0fc0b0a, 1823 0x0614b605,
1820 0x12f4d0fc, 1824 0xb60011cf,
1821 0x027cb90a, 1825 0x07f10110,
1822 0x034221f5, 1826 0x04b605d8,
1823/* 0x0c74: i2c_recv_exit */ 1827 0x0001d006,
1824/* 0x0c76: i2c_init */ 1828 0xe7f104bd,
1829 0xe3f1d900,
1830 0x21f5134f,
1831 0x00f80257,
1832/* 0x0c94: test_init */
1833 0x0800e7f1,
1834 0x025721f5,
1835/* 0x0c9e: idle_recv */
1825 0x00f800f8, 1836 0x00f800f8,
1826/* 0x0c78: test_recv */ 1837/* 0x0ca0: idle */
1827 0x05d817f1, 1838 0xf10031f4,
1828 0xcf0614b6, 1839 0xb605d417,
1829 0x10b60011, 1840 0x11cf0614,
1830 0xd807f101, 1841 0x0110b600,
1831 0x0604b605, 1842 0x05d407f1,
1832 0xbd0001d0, 1843 0xd00604b6,
1833 0x00e7f104, 1844 0x04bd0001,
1834 0x4fe3f1d9, 1845/* 0x0cbc: idle_loop */
1835 0x6221f513, 1846 0xf45817f0,
1836/* 0x0c9f: test_init */ 1847/* 0x0cc2: idle_proc */
1837 0xf100f802, 1848/* 0x0cc2: idle_proc_exec */
1838 0xf50800e7, 1849 0x10f90232,
1839 0xf8026221, 1850 0xf5021eb9,
1840/* 0x0ca9: idle_recv */ 1851 0xfc034021,
1841/* 0x0cab: idle */ 1852 0x0911f410,
1842 0xf400f800, 1853 0xf40231f4,
1843 0x17f10031, 1854/* 0x0cd6: idle_proc_next */
1844 0x14b605d4, 1855 0x10b6ef0e,
1845 0x0011cf06, 1856 0x061fb858,
1846 0xf10110b6, 1857 0xf4e61bf4,
1847 0xb605d407, 1858 0x28f4dd02,
1848 0x01d00604, 1859 0xbb0ef400,
1849/* 0x0cc7: idle_loop */ 1860 0x00000000,
1850 0xf004bd00, 1861 0x00000000,
1851 0x32f45817, 1862 0x00000000,
1852/* 0x0ccd: idle_proc */
1853/* 0x0ccd: idle_proc_exec */
1854 0xb910f902,
1855 0x21f5021e,
1856 0x10fc034b,
1857 0xf40911f4,
1858 0x0ef40231,
1859/* 0x0ce1: idle_proc_next */
1860 0x5810b6ef,
1861 0xf4061fb8,
1862 0x02f4e61b,
1863 0x0028f4dd,
1864 0x00bb0ef4,
1865 0x00000000, 1863 0x00000000,
1866 0x00000000, 1864 0x00000000,
1867 0x00000000, 1865 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
index 5cf5be63cbef..ad35fa57be94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
@@ -225,17 +225,11 @@ intr:
225 nv_iowr(NV_PPWR_SUBINTR, $r9) 225 nv_iowr(NV_PPWR_SUBINTR, $r9)
226 226
227 intr_skip_subintr: 227 intr_skip_subintr:
228 and $r9 $r8 NV_PPWR_INTR_PAUSE 228 mov $r9 (NV_PPWR_INTR_USER0 | NV_PPWR_INTR_USER1 | NV_PPWR_INTR_PAUSE)
229 bra z #intr_skip_pause 229 not b32 $r9
230 and $r10 0xffbf 230 and $r8 $r9
231
232 intr_skip_pause:
233 and $r9 $r8 NV_PPWR_INTR_USER0
234 bra z #intr_skip_user0
235 and $r10 0xffbf
236
237 intr_skip_user0:
238 nv_iowr(NV_PPWR_INTR_ACK, $r8) 231 nv_iowr(NV_PPWR_INTR_ACK, $r8)
232
239 pop $r8 233 pop $r8
240 mov $flags $r8 234 mov $flags $r8
241 pop $r15 235 pop $r15
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index d942fa7b9f18..86f9f3b13f71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); 81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
82 nvkm_rd32(device, 0x000200); 82 nvkm_rd32(device, 0x000200);
83 83
84 if ( nvkm_boolopt(device->cfgopt, "War00C800_0", 84 if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) {
85 device->quirk ? device->quirk->War00C800_0 : false)) {
86 nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
87 switch (device->chipset) { 85 switch (device->chipset) {
88 case 0xe4: 86 case 0xe4:
89 magic(device, 0x04000000); 87 magic(device, 0x04000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 6326fdc5a48d..2c92ffb5f9d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -107,7 +107,7 @@ nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
107{ 107{
108 struct nvkm_subdev *subdev = &therm->subdev; 108 struct nvkm_subdev *subdev = &therm->subdev;
109 struct nvkm_device *device = subdev->device; 109 struct nvkm_device *device = subdev->device;
110 u32 mask = enable ? 0x80000000 : 0x0000000; 110 u32 mask = enable ? 0x80000000 : 0x00000000;
111 if (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask); 111 if (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask);
112 else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask); 112 else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask);
113 else { 113 else {
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index 778372b062ad..368c1ec6805a 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -12,10 +12,11 @@ omapdrm-y := omap_drv.o \
12 omap_encoder.o \ 12 omap_encoder.o \
13 omap_connector.o \ 13 omap_connector.o \
14 omap_fb.o \ 14 omap_fb.o \
15 omap_fbdev.o \
16 omap_gem.o \ 15 omap_gem.o \
17 omap_gem_dmabuf.o \ 16 omap_gem_dmabuf.o \
18 omap_dmm_tiler.o \ 17 omap_dmm_tiler.o \
19 tcm-sita.o 18 tcm-sita.o
20 19
20omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
21
21obj-$(CONFIG_DRM_OMAP) += omapdrm.o 22obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index ad09590e8a46..2ed0754ed19e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -524,7 +524,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
524 omap_crtc->mgr = omap_dss_get_overlay_manager(channel); 524 omap_crtc->mgr = omap_dss_get_overlay_manager(channel);
525 525
526 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, 526 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
527 &omap_crtc_funcs); 527 &omap_crtc_funcs, NULL);
528 if (ret < 0) { 528 if (ret < 0) {
529 kfree(omap_crtc); 529 kfree(omap_crtc);
530 return NULL; 530 return NULL;
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index ee91a25127f9..6f5fc14fc015 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -51,6 +51,7 @@ static int mm_show(struct seq_file *m, void *arg)
51 return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); 51 return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
52} 52}
53 53
54#ifdef CONFIG_DRM_FBDEV_EMULATION
54static int fb_show(struct seq_file *m, void *arg) 55static int fb_show(struct seq_file *m, void *arg)
55{ 56{
56 struct drm_info_node *node = (struct drm_info_node *) m->private; 57 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -73,12 +74,15 @@ static int fb_show(struct seq_file *m, void *arg)
73 74
74 return 0; 75 return 0;
75} 76}
77#endif
76 78
77/* list of debufs files that are applicable to all devices */ 79/* list of debufs files that are applicable to all devices */
78static struct drm_info_list omap_debugfs_list[] = { 80static struct drm_info_list omap_debugfs_list[] = {
79 {"gem", gem_show, 0}, 81 {"gem", gem_show, 0},
80 {"mm", mm_show, 0}, 82 {"mm", mm_show, 0},
83#ifdef CONFIG_DRM_FBDEV_EMULATION
81 {"fb", fb_show, 0}, 84 {"fb", fb_show, 0},
85#endif
82}; 86};
83 87
84/* list of debugfs files that are specific to devices with dmm/tiler */ 88/* list of debugfs files that are specific to devices with dmm/tiler */
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 7841970de48d..dfebdc4aa0f2 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -363,6 +363,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
363 u32 min_align = 128; 363 u32 min_align = 128;
364 int ret; 364 int ret;
365 unsigned long flags; 365 unsigned long flags;
366 size_t slot_bytes;
366 367
367 BUG_ON(!validfmt(fmt)); 368 BUG_ON(!validfmt(fmt));
368 369
@@ -371,13 +372,15 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
371 h = DIV_ROUND_UP(h, geom[fmt].slot_h); 372 h = DIV_ROUND_UP(h, geom[fmt].slot_h);
372 373
373 /* convert alignment to slots */ 374 /* convert alignment to slots */
374 min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp)); 375 slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
375 align = ALIGN(align, min_align); 376 min_align = max(min_align, slot_bytes);
376 align /= geom[fmt].slot_w * geom[fmt].cpp; 377 align = (align > min_align) ? ALIGN(align, min_align) : min_align;
378 align /= slot_bytes;
377 379
378 block->fmt = fmt; 380 block->fmt = fmt;
379 381
380 ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area); 382 ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
383 &block->area);
381 if (ret) { 384 if (ret) {
382 kfree(block); 385 kfree(block);
383 return ERR_PTR(-ENOMEM); 386 return ERR_PTR(-ENOMEM);
@@ -739,8 +742,7 @@ static int omap_dmm_probe(struct platform_device *dev)
739 programming during reill operations */ 742 programming during reill operations */
740 for (i = 0; i < omap_dmm->num_lut; i++) { 743 for (i = 0; i < omap_dmm->num_lut; i++) {
741 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, 744 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
742 omap_dmm->container_height, 745 omap_dmm->container_height);
743 NULL);
744 746
745 if (!omap_dmm->tcm[i]) { 747 if (!omap_dmm->tcm[i]) {
746 dev_err(&dev->dev, "failed to allocate container\n"); 748 dev_err(&dev->dev, "failed to allocate container\n");
@@ -1030,4 +1032,3 @@ struct platform_driver omap_dmm_driver = {
1030MODULE_LICENSE("GPL v2"); 1032MODULE_LICENSE("GPL v2");
1031MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>"); 1033MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
1032MODULE_DESCRIPTION("OMAP DMM/Tiler Driver"); 1034MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
1033MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 5c6609cbb6a2..dfafdb602ad2 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -547,14 +547,19 @@ static int ioctl_set_param(struct drm_device *dev, void *data,
547 return 0; 547 return 0;
548} 548}
549 549
550#define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */
551
550static int ioctl_gem_new(struct drm_device *dev, void *data, 552static int ioctl_gem_new(struct drm_device *dev, void *data,
551 struct drm_file *file_priv) 553 struct drm_file *file_priv)
552{ 554{
553 struct drm_omap_gem_new *args = data; 555 struct drm_omap_gem_new *args = data;
556 u32 flags = args->flags & OMAP_BO_USER_MASK;
557
554 VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv, 558 VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
555 args->size.bytes, args->flags); 559 args->size.bytes, flags);
556 return omap_gem_new_handle(dev, file_priv, args->size, 560
557 args->flags, &args->handle); 561 return omap_gem_new_handle(dev, file_priv, args->size, flags,
562 &args->handle);
558} 563}
559 564
560static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 565static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
@@ -692,10 +697,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
692 drm_crtc_vblank_off(priv->crtcs[i]); 697 drm_crtc_vblank_off(priv->crtcs[i]);
693 698
694 priv->fbdev = omap_fbdev_init(dev); 699 priv->fbdev = omap_fbdev_init(dev);
695 if (!priv->fbdev) {
696 dev_warn(dev->dev, "omap_fbdev_init failed\n");
697 /* well, limp along without an fbdev.. maybe X11 will work? */
698 }
699 700
700 /* store off drm_device for use in pm ops */ 701 /* store off drm_device for use in pm ops */
701 dev_set_drvdata(dev->dev, dev); 702 dev_set_drvdata(dev->dev, dev);
@@ -831,7 +832,8 @@ static const struct file_operations omapdriver_fops = {
831}; 832};
832 833
833static struct drm_driver omap_drm_driver = { 834static struct drm_driver omap_drm_driver = {
834 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 835 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
836 DRIVER_ATOMIC,
835 .load = dev_load, 837 .load = dev_load,
836 .unload = dev_unload, 838 .unload = dev_unload,
837 .open = dev_open, 839 .open = dev_open,
@@ -928,35 +930,23 @@ static struct platform_driver pdev = {
928 .remove = pdev_remove, 930 .remove = pdev_remove,
929}; 931};
930 932
933static struct platform_driver * const drivers[] = {
934 &omap_dmm_driver,
935 &pdev,
936};
937
931static int __init omap_drm_init(void) 938static int __init omap_drm_init(void)
932{ 939{
933 int r;
934
935 DBG("init"); 940 DBG("init");
936 941
937 r = platform_driver_register(&omap_dmm_driver); 942 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
938 if (r) {
939 pr_err("DMM driver registration failed\n");
940 return r;
941 }
942
943 r = platform_driver_register(&pdev);
944 if (r) {
945 pr_err("omapdrm driver registration failed\n");
946 platform_driver_unregister(&omap_dmm_driver);
947 return r;
948 }
949
950 return 0;
951} 943}
952 944
953static void __exit omap_drm_fini(void) 945static void __exit omap_drm_fini(void)
954{ 946{
955 DBG("fini"); 947 DBG("fini");
956 948
957 platform_driver_unregister(&pdev); 949 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
958
959 platform_driver_unregister(&omap_dmm_driver);
960} 950}
961 951
962/* need late_initcall() so we load after dss_driver's are loaded */ 952/* need late_initcall() so we load after dss_driver's are loaded */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 130fca70bfd7..9e0030731c37 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -36,11 +36,7 @@
36 36
37#define MODULE_NAME "omapdrm" 37#define MODULE_NAME "omapdrm"
38 38
39/* max # of mapper-id's that can be assigned.. todo, come up with a better 39struct omap_drm_usergart;
40 * (but still inexpensive) way to store/access per-buffer mapper private
41 * data..
42 */
43#define MAX_MAPPERS 2
44 40
45/* parameters which describe (unrotated) coordinates of scanout within a fb: */ 41/* parameters which describe (unrotated) coordinates of scanout within a fb: */
46struct omap_drm_window { 42struct omap_drm_window {
@@ -97,6 +93,7 @@ struct omap_drm_private {
97 /* list of GEM objects: */ 93 /* list of GEM objects: */
98 struct list_head obj_list; 94 struct list_head obj_list;
99 95
96 struct omap_drm_usergart *usergart;
100 bool has_dmm; 97 bool has_dmm;
101 98
102 /* properties: */ 99 /* properties: */
@@ -138,8 +135,18 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
138void omap_drm_irq_uninstall(struct drm_device *dev); 135void omap_drm_irq_uninstall(struct drm_device *dev);
139int omap_drm_irq_install(struct drm_device *dev); 136int omap_drm_irq_install(struct drm_device *dev);
140 137
138#ifdef CONFIG_DRM_FBDEV_EMULATION
141struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev); 139struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
142void omap_fbdev_free(struct drm_device *dev); 140void omap_fbdev_free(struct drm_device *dev);
141#else
142static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
143{
144 return NULL;
145}
146static inline void omap_fbdev_free(struct drm_device *dev)
147{
148}
149#endif
143 150
144struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc); 151struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
145enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); 152enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 7d9b32a0eb43..61714e9670ae 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -110,8 +110,6 @@ static int omap_encoder_update(struct drm_encoder *encoder,
110 struct omap_dss_driver *dssdrv = dssdev->driver; 110 struct omap_dss_driver *dssdrv = dssdev->driver;
111 int ret; 111 int ret;
112 112
113 dssdev->src->manager = omap_dss_get_overlay_manager(channel);
114
115 if (dssdrv->check_timings) { 113 if (dssdrv->check_timings) {
116 ret = dssdrv->check_timings(dssdev, timings); 114 ret = dssdrv->check_timings(dssdev, timings);
117 } else { 115 } else {
@@ -178,7 +176,7 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
178 encoder = &omap_encoder->base; 176 encoder = &omap_encoder->base;
179 177
180 drm_encoder_init(dev, encoder, &omap_encoder_funcs, 178 drm_encoder_init(dev, encoder, &omap_encoder_funcs,
181 DRM_MODE_ENCODER_TMDS); 179 DRM_MODE_ENCODER_TMDS, NULL);
182 drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs); 180 drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
183 181
184 return encoder; 182 return encoder;
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b8e4cdec28c3..3cb16f0cf381 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
112 dma_addr_t paddr; 112 dma_addr_t paddr;
113 int ret; 113 int ret;
114 114
115 /* only doing ARGB32 since this is what is needed to alpha-blend
116 * with video overlays:
117 */
118 sizes->surface_bpp = 32; 115 sizes->surface_bpp = 32;
119 sizes->surface_depth = 32; 116 sizes->surface_depth = 24;
120 117
121 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 118 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
122 sizes->surface_height, sizes->surface_bpp, 119 sizes->surface_height, sizes->surface_bpp,
@@ -298,6 +295,10 @@ fini:
298 drm_fb_helper_fini(helper); 295 drm_fb_helper_fini(helper);
299fail: 296fail:
300 kfree(fbdev); 297 kfree(fbdev);
298
299 dev_warn(dev->dev, "omap_fbdev_init failed\n");
300 /* well, limp along without an fbdev.. maybe X11 will work? */
301
301 return NULL; 302 return NULL;
302} 303}
303 304
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 7ed08fdc4c42..984462622291 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -25,24 +25,15 @@
25#include "omap_drv.h" 25#include "omap_drv.h"
26#include "omap_dmm_tiler.h" 26#include "omap_dmm_tiler.h"
27 27
28/* remove these once drm core helpers are merged */
29struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
30void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
31 bool dirty, bool accessed);
32int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
33
34/* 28/*
35 * GEM buffer object implementation. 29 * GEM buffer object implementation.
36 */ 30 */
37 31
38#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39
40/* note: we use upper 8 bits of flags for driver-internal flags: */ 32/* note: we use upper 8 bits of flags for driver-internal flags: */
41#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ 33#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
42#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ 34#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
43#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ 35#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
44 36
45
46struct omap_gem_object { 37struct omap_gem_object {
47 struct drm_gem_object base; 38 struct drm_gem_object base;
48 39
@@ -119,8 +110,7 @@ struct omap_gem_object {
119 } *sync; 110 } *sync;
120}; 111};
121 112
122static int get_pages(struct drm_gem_object *obj, struct page ***pages); 113#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
123static uint64_t mmap_offset(struct drm_gem_object *obj);
124 114
125/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 115/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
126 * not necessarily pinned in TILER all the time, and (b) when they are 116 * not necessarily pinned in TILER all the time, and (b) when they are
@@ -134,27 +124,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj);
134 * for later.. 124 * for later..
135 */ 125 */
136#define NUM_USERGART_ENTRIES 2 126#define NUM_USERGART_ENTRIES 2
137struct usergart_entry { 127struct omap_drm_usergart_entry {
138 struct tiler_block *block; /* the reserved tiler block */ 128 struct tiler_block *block; /* the reserved tiler block */
139 dma_addr_t paddr; 129 dma_addr_t paddr;
140 struct drm_gem_object *obj; /* the current pinned obj */ 130 struct drm_gem_object *obj; /* the current pinned obj */
141 pgoff_t obj_pgoff; /* page offset of obj currently 131 pgoff_t obj_pgoff; /* page offset of obj currently
142 mapped in */ 132 mapped in */
143}; 133};
144static struct { 134
145 struct usergart_entry entry[NUM_USERGART_ENTRIES]; 135struct omap_drm_usergart {
136 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
146 int height; /* height in rows */ 137 int height; /* height in rows */
147 int height_shift; /* ilog2(height in rows) */ 138 int height_shift; /* ilog2(height in rows) */
148 int slot_shift; /* ilog2(width per slot) */ 139 int slot_shift; /* ilog2(width per slot) */
149 int stride_pfn; /* stride in pages */ 140 int stride_pfn; /* stride in pages */
150 int last; /* index of last used entry */ 141 int last; /* index of last used entry */
151} *usergart; 142};
143
144/* -----------------------------------------------------------------------------
145 * Helpers
146 */
147
148/** get mmap offset */
149static uint64_t mmap_offset(struct drm_gem_object *obj)
150{
151 struct drm_device *dev = obj->dev;
152 int ret;
153 size_t size;
154
155 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
156
157 /* Make it mmapable */
158 size = omap_gem_mmap_size(obj);
159 ret = drm_gem_create_mmap_offset_size(obj, size);
160 if (ret) {
161 dev_err(dev->dev, "could not allocate mmap offset\n");
162 return 0;
163 }
164
165 return drm_vma_node_offset_addr(&obj->vma_node);
166}
167
168/* GEM objects can either be allocated from contiguous memory (in which
169 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
170 * contiguous buffers can be remapped in TILER/DMM if they need to be
171 * contiguous... but we don't do this all the time to reduce pressure
172 * on TILER/DMM space when we know at allocation time that the buffer
173 * will need to be scanned out.
174 */
175static inline bool is_shmem(struct drm_gem_object *obj)
176{
177 return obj->filp != NULL;
178}
179
180/* -----------------------------------------------------------------------------
181 * Eviction
182 */
152 183
153static void evict_entry(struct drm_gem_object *obj, 184static void evict_entry(struct drm_gem_object *obj,
154 enum tiler_fmt fmt, struct usergart_entry *entry) 185 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
155{ 186{
156 struct omap_gem_object *omap_obj = to_omap_bo(obj); 187 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height; 188 struct omap_drm_private *priv = obj->dev->dev_private;
189 int n = priv->usergart[fmt].height;
158 size_t size = PAGE_SIZE * n; 190 size_t size = PAGE_SIZE * n;
159 loff_t off = mmap_offset(obj) + 191 loff_t off = mmap_offset(obj) +
160 (entry->obj_pgoff << PAGE_SHIFT); 192 (entry->obj_pgoff << PAGE_SHIFT);
@@ -180,46 +212,25 @@ static void evict_entry(struct drm_gem_object *obj,
180static void evict(struct drm_gem_object *obj) 212static void evict(struct drm_gem_object *obj)
181{ 213{
182 struct omap_gem_object *omap_obj = to_omap_bo(obj); 214 struct omap_gem_object *omap_obj = to_omap_bo(obj);
215 struct omap_drm_private *priv = obj->dev->dev_private;
183 216
184 if (omap_obj->flags & OMAP_BO_TILED) { 217 if (omap_obj->flags & OMAP_BO_TILED) {
185 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 218 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
186 int i; 219 int i;
187 220
188 if (!usergart)
189 return;
190
191 for (i = 0; i < NUM_USERGART_ENTRIES; i++) { 221 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
192 struct usergart_entry *entry = &usergart[fmt].entry[i]; 222 struct omap_drm_usergart_entry *entry =
223 &priv->usergart[fmt].entry[i];
224
193 if (entry->obj == obj) 225 if (entry->obj == obj)
194 evict_entry(obj, fmt, entry); 226 evict_entry(obj, fmt, entry);
195 } 227 }
196 } 228 }
197} 229}
198 230
199/* GEM objects can either be allocated from contiguous memory (in which 231/* -----------------------------------------------------------------------------
200 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non 232 * Page Management
201 * contiguous buffers can be remapped in TILER/DMM if they need to be
202 * contiguous... but we don't do this all the time to reduce pressure
203 * on TILER/DMM space when we know at allocation time that the buffer
204 * will need to be scanned out.
205 */
206static inline bool is_shmem(struct drm_gem_object *obj)
207{
208 return obj->filp != NULL;
209}
210
211/**
212 * shmem buffers that are mapped cached can simulate coherency via using
213 * page faulting to keep track of dirty pages
214 */ 233 */
215static inline bool is_cached_coherent(struct drm_gem_object *obj)
216{
217 struct omap_gem_object *omap_obj = to_omap_bo(obj);
218 return is_shmem(obj) &&
219 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
220}
221
222static DEFINE_SPINLOCK(sync_lock);
223 234
224/** ensure backing pages are allocated */ 235/** ensure backing pages are allocated */
225static int omap_gem_attach_pages(struct drm_gem_object *obj) 236static int omap_gem_attach_pages(struct drm_gem_object *obj)
@@ -272,6 +283,28 @@ free_pages:
272 return ret; 283 return ret;
273} 284}
274 285
286/* acquire pages when needed (for example, for DMA where physically
287 * contiguous buffer is not required
288 */
289static int get_pages(struct drm_gem_object *obj, struct page ***pages)
290{
291 struct omap_gem_object *omap_obj = to_omap_bo(obj);
292 int ret = 0;
293
294 if (is_shmem(obj) && !omap_obj->pages) {
295 ret = omap_gem_attach_pages(obj);
296 if (ret) {
297 dev_err(obj->dev->dev, "could not attach pages\n");
298 return ret;
299 }
300 }
301
302 /* TODO: even phys-contig.. we should have a list of pages? */
303 *pages = omap_obj->pages;
304
305 return 0;
306}
307
275/** release backing pages */ 308/** release backing pages */
276static void omap_gem_detach_pages(struct drm_gem_object *obj) 309static void omap_gem_detach_pages(struct drm_gem_object *obj)
277{ 310{
@@ -301,26 +334,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
301 return to_omap_bo(obj)->flags; 334 return to_omap_bo(obj)->flags;
302} 335}
303 336
304/** get mmap offset */
305static uint64_t mmap_offset(struct drm_gem_object *obj)
306{
307 struct drm_device *dev = obj->dev;
308 int ret;
309 size_t size;
310
311 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
312
313 /* Make it mmapable */
314 size = omap_gem_mmap_size(obj);
315 ret = drm_gem_create_mmap_offset_size(obj, size);
316 if (ret) {
317 dev_err(dev->dev, "could not allocate mmap offset\n");
318 return 0;
319 }
320
321 return drm_vma_node_offset_addr(&obj->vma_node);
322}
323
324uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 337uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
325{ 338{
326 uint64_t offset; 339 uint64_t offset;
@@ -362,6 +375,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
362 return -EINVAL; 375 return -EINVAL;
363} 376}
364 377
378/* -----------------------------------------------------------------------------
379 * Fault Handling
380 */
381
365/* Normal handling for the case of faulting in non-tiled buffers */ 382/* Normal handling for the case of faulting in non-tiled buffers */
366static int fault_1d(struct drm_gem_object *obj, 383static int fault_1d(struct drm_gem_object *obj,
367 struct vm_area_struct *vma, struct vm_fault *vmf) 384 struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -393,7 +410,8 @@ static int fault_2d(struct drm_gem_object *obj,
393 struct vm_area_struct *vma, struct vm_fault *vmf) 410 struct vm_area_struct *vma, struct vm_fault *vmf)
394{ 411{
395 struct omap_gem_object *omap_obj = to_omap_bo(obj); 412 struct omap_gem_object *omap_obj = to_omap_bo(obj);
396 struct usergart_entry *entry; 413 struct omap_drm_private *priv = obj->dev->dev_private;
414 struct omap_drm_usergart_entry *entry;
397 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 415 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
398 struct page *pages[64]; /* XXX is this too much to have on stack? */ 416 struct page *pages[64]; /* XXX is this too much to have on stack? */
399 unsigned long pfn; 417 unsigned long pfn;
@@ -406,8 +424,8 @@ static int fault_2d(struct drm_gem_object *obj,
406 * that need to be mapped in to fill 4kb wide CPU page. If the slot 424 * that need to be mapped in to fill 4kb wide CPU page. If the slot
407 * height is 64, then 64 pages fill a 4kb wide by 64 row region. 425 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
408 */ 426 */
409 const int n = usergart[fmt].height; 427 const int n = priv->usergart[fmt].height;
410 const int n_shift = usergart[fmt].height_shift; 428 const int n_shift = priv->usergart[fmt].height_shift;
411 429
412 /* 430 /*
413 * If buffer width in bytes > PAGE_SIZE then the virtual stride is 431 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
@@ -428,11 +446,11 @@ static int fault_2d(struct drm_gem_object *obj,
428 base_pgoff = round_down(pgoff, m << n_shift); 446 base_pgoff = round_down(pgoff, m << n_shift);
429 447
430 /* figure out buffer width in slots */ 448 /* figure out buffer width in slots */
431 slots = omap_obj->width >> usergart[fmt].slot_shift; 449 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
432 450
433 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); 451 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
434 452
435 entry = &usergart[fmt].entry[usergart[fmt].last]; 453 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
436 454
437 /* evict previous buffer using this usergart entry, if any: */ 455 /* evict previous buffer using this usergart entry, if any: */
438 if (entry->obj) 456 if (entry->obj)
@@ -479,12 +497,13 @@ static int fault_2d(struct drm_gem_object *obj,
479 497
480 for (i = n; i > 0; i--) { 498 for (i = n; i > 0; i--) {
481 vm_insert_mixed(vma, (unsigned long)vaddr, pfn); 499 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
482 pfn += usergart[fmt].stride_pfn; 500 pfn += priv->usergart[fmt].stride_pfn;
483 vaddr += PAGE_SIZE * m; 501 vaddr += PAGE_SIZE * m;
484 } 502 }
485 503
486 /* simple round-robin: */ 504 /* simple round-robin: */
487 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES; 505 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
506 % NUM_USERGART_ENTRIES;
488 507
489 return 0; 508 return 0;
490} 509}
@@ -596,6 +615,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
596 return 0; 615 return 0;
597} 616}
598 617
618/* -----------------------------------------------------------------------------
619 * Dumb Buffers
620 */
599 621
600/** 622/**
601 * omap_gem_dumb_create - create a dumb buffer 623 * omap_gem_dumb_create - create a dumb buffer
@@ -653,6 +675,7 @@ fail:
653 return ret; 675 return ret;
654} 676}
655 677
678#ifdef CONFIG_DRM_FBDEV_EMULATION
656/* Set scrolling position. This allows us to implement fast scrolling 679/* Set scrolling position. This allows us to implement fast scrolling
657 * for console. 680 * for console.
658 * 681 *
@@ -689,6 +712,22 @@ fail:
689 712
690 return ret; 713 return ret;
691} 714}
715#endif
716
717/* -----------------------------------------------------------------------------
718 * Memory Management & DMA Sync
719 */
720
721/**
722 * shmem buffers that are mapped cached can simulate coherency via using
723 * page faulting to keep track of dirty pages
724 */
725static inline bool is_cached_coherent(struct drm_gem_object *obj)
726{
727 struct omap_gem_object *omap_obj = to_omap_bo(obj);
728 return is_shmem(obj) &&
729 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
730}
692 731
693/* Sync the buffer for CPU access.. note pages should already be 732/* Sync the buffer for CPU access.. note pages should already be
694 * attached, ie. omap_gem_get_pages() 733 * attached, ie. omap_gem_get_pages()
@@ -865,28 +904,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
865 return ret; 904 return ret;
866} 905}
867 906
868/* acquire pages when needed (for example, for DMA where physically
869 * contiguous buffer is not required
870 */
871static int get_pages(struct drm_gem_object *obj, struct page ***pages)
872{
873 struct omap_gem_object *omap_obj = to_omap_bo(obj);
874 int ret = 0;
875
876 if (is_shmem(obj) && !omap_obj->pages) {
877 ret = omap_gem_attach_pages(obj);
878 if (ret) {
879 dev_err(obj->dev->dev, "could not attach pages\n");
880 return ret;
881 }
882 }
883
884 /* TODO: even phys-contig.. we should have a list of pages? */
885 *pages = omap_obj->pages;
886
887 return 0;
888}
889
890/* if !remap, and we don't have pages backing, then fail, rather than 907/* if !remap, and we don't have pages backing, then fail, rather than
891 * increasing the pin count (which we don't really do yet anyways, 908 * increasing the pin count (which we don't really do yet anyways,
892 * because we don't support swapping pages back out). And 'remap' 909 * because we don't support swapping pages back out). And 'remap'
@@ -924,6 +941,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
924 return 0; 941 return 0;
925} 942}
926 943
944#ifdef CONFIG_DRM_FBDEV_EMULATION
927/* Get kernel virtual address for CPU access.. this more or less only 945/* Get kernel virtual address for CPU access.. this more or less only
928 * exists for omap_fbdev. This should be called with struct_mutex 946 * exists for omap_fbdev. This should be called with struct_mutex
929 * held. 947 * held.
@@ -942,6 +960,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
942 } 960 }
943 return omap_obj->vaddr; 961 return omap_obj->vaddr;
944} 962}
963#endif
964
965/* -----------------------------------------------------------------------------
966 * Power Management
967 */
945 968
946#ifdef CONFIG_PM 969#ifdef CONFIG_PM
947/* re-pin objects in DMM in resume path: */ 970/* re-pin objects in DMM in resume path: */
@@ -971,6 +994,10 @@ int omap_gem_resume(struct device *dev)
971} 994}
972#endif 995#endif
973 996
997/* -----------------------------------------------------------------------------
998 * DebugFS
999 */
1000
974#ifdef CONFIG_DEBUG_FS 1001#ifdef CONFIG_DEBUG_FS
975void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 1002void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
976{ 1003{
@@ -1017,9 +1044,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1017} 1044}
1018#endif 1045#endif
1019 1046
1020/* Buffer Synchronization: 1047/* -----------------------------------------------------------------------------
1048 * Buffer Synchronization
1021 */ 1049 */
1022 1050
1051static DEFINE_SPINLOCK(sync_lock);
1052
1023struct omap_gem_sync_waiter { 1053struct omap_gem_sync_waiter {
1024 struct list_head list; 1054 struct list_head list;
1025 struct omap_gem_object *omap_obj; 1055 struct omap_gem_object *omap_obj;
@@ -1265,6 +1295,10 @@ unlock:
1265 return ret; 1295 return ret;
1266} 1296}
1267 1297
1298/* -----------------------------------------------------------------------------
1299 * Constructor & Destructor
1300 */
1301
1268/* don't call directly.. called from GEM core when it is time to actually 1302/* don't call directly.. called from GEM core when it is time to actually
1269 * free the object.. 1303 * free the object..
1270 */ 1304 */
@@ -1282,8 +1316,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1282 list_del(&omap_obj->mm_list); 1316 list_del(&omap_obj->mm_list);
1283 spin_unlock(&priv->list_lock); 1317 spin_unlock(&priv->list_lock);
1284 1318
1285 drm_gem_free_mmap_offset(obj);
1286
1287 /* this means the object is still pinned.. which really should 1319 /* this means the object is still pinned.. which really should
1288 * not happen. I think.. 1320 * not happen. I think..
1289 */ 1321 */
@@ -1308,31 +1340,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1308 1340
1309 drm_gem_object_release(obj); 1341 drm_gem_object_release(obj);
1310 1342
1311 kfree(obj); 1343 kfree(omap_obj);
1312}
1313
1314/* convenience method to construct a GEM buffer object, and userspace handle */
1315int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1316 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1317{
1318 struct drm_gem_object *obj;
1319 int ret;
1320
1321 obj = omap_gem_new(dev, gsize, flags);
1322 if (!obj)
1323 return -ENOMEM;
1324
1325 ret = drm_gem_handle_create(file, obj, handle);
1326 if (ret) {
1327 drm_gem_object_release(obj);
1328 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1329 return ret;
1330 }
1331
1332 /* drop reference from allocate - handle holds it now */
1333 drm_gem_object_unreference_unlocked(obj);
1334
1335 return 0;
1336} 1344}
1337 1345
1338/* GEM buffer object constructor */ 1346/* GEM buffer object constructor */
@@ -1341,15 +1349,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1341{ 1349{
1342 struct omap_drm_private *priv = dev->dev_private; 1350 struct omap_drm_private *priv = dev->dev_private;
1343 struct omap_gem_object *omap_obj; 1351 struct omap_gem_object *omap_obj;
1344 struct drm_gem_object *obj = NULL; 1352 struct drm_gem_object *obj;
1345 struct address_space *mapping; 1353 struct address_space *mapping;
1346 size_t size; 1354 size_t size;
1347 int ret; 1355 int ret;
1348 1356
1349 if (flags & OMAP_BO_TILED) { 1357 if (flags & OMAP_BO_TILED) {
1350 if (!usergart) { 1358 if (!priv->usergart) {
1351 dev_err(dev->dev, "Tiled buffers require DMM\n"); 1359 dev_err(dev->dev, "Tiled buffers require DMM\n");
1352 goto fail; 1360 return NULL;
1353 } 1361 }
1354 1362
1355 /* tiled buffers are always shmem paged backed.. when they are 1363 /* tiled buffers are always shmem paged backed.. when they are
@@ -1420,16 +1428,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1420 return obj; 1428 return obj;
1421 1429
1422fail: 1430fail:
1423 if (obj) 1431 omap_gem_free_object(obj);
1432 return NULL;
1433}
1434
1435/* convenience method to construct a GEM buffer object, and userspace handle */
1436int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1437 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1438{
1439 struct drm_gem_object *obj;
1440 int ret;
1441
1442 obj = omap_gem_new(dev, gsize, flags);
1443 if (!obj)
1444 return -ENOMEM;
1445
1446 ret = drm_gem_handle_create(file, obj, handle);
1447 if (ret) {
1424 omap_gem_free_object(obj); 1448 omap_gem_free_object(obj);
1449 return ret;
1450 }
1425 1451
1426 return NULL; 1452 /* drop reference from allocate - handle holds it now */
1453 drm_gem_object_unreference_unlocked(obj);
1454
1455 return 0;
1427} 1456}
1428 1457
1429/* init/cleanup.. if DMM is used, we need to set some stuff up.. */ 1458/* -----------------------------------------------------------------------------
1459 * Init & Cleanup
1460 */
1461
1462/* If DMM is used, we need to set some stuff up.. */
1430void omap_gem_init(struct drm_device *dev) 1463void omap_gem_init(struct drm_device *dev)
1431{ 1464{
1432 struct omap_drm_private *priv = dev->dev_private; 1465 struct omap_drm_private *priv = dev->dev_private;
1466 struct omap_drm_usergart *usergart;
1433 const enum tiler_fmt fmts[] = { 1467 const enum tiler_fmt fmts[] = {
1434 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 1468 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1435 }; 1469 };
@@ -1458,10 +1492,11 @@ void omap_gem_init(struct drm_device *dev)
1458 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; 1492 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1459 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); 1493 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1460 for (j = 0; j < NUM_USERGART_ENTRIES; j++) { 1494 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1461 struct usergart_entry *entry = &usergart[i].entry[j]; 1495 struct omap_drm_usergart_entry *entry;
1462 struct tiler_block *block = 1496 struct tiler_block *block;
1463 tiler_reserve_2d(fmts[i], w, h, 1497
1464 PAGE_SIZE); 1498 entry = &usergart[i].entry[j];
1499 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1465 if (IS_ERR(block)) { 1500 if (IS_ERR(block)) {
1466 dev_err(dev->dev, 1501 dev_err(dev->dev,
1467 "reserve failed: %d, %d, %ld\n", 1502 "reserve failed: %d, %d, %ld\n",
@@ -1477,13 +1512,16 @@ void omap_gem_init(struct drm_device *dev)
1477 } 1512 }
1478 } 1513 }
1479 1514
1515 priv->usergart = usergart;
1480 priv->has_dmm = true; 1516 priv->has_dmm = true;
1481} 1517}
1482 1518
1483void omap_gem_deinit(struct drm_device *dev) 1519void omap_gem_deinit(struct drm_device *dev)
1484{ 1520{
1521 struct omap_drm_private *priv = dev->dev_private;
1522
1485 /* I believe we can rely on there being no more outstanding GEM 1523 /* I believe we can rely on there being no more outstanding GEM
1486 * objects which could depend on usergart/dmm at this point. 1524 * objects which could depend on usergart/dmm at this point.
1487 */ 1525 */
1488 kfree(usergart); 1526 kfree(priv->usergart);
1489} 1527}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3054bda72688..d75b197eff46 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -188,33 +188,6 @@ static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
188 .atomic_disable = omap_plane_atomic_disable, 188 .atomic_disable = omap_plane_atomic_disable,
189}; 189};
190 190
191static void omap_plane_reset(struct drm_plane *plane)
192{
193 struct omap_plane *omap_plane = to_omap_plane(plane);
194 struct omap_plane_state *omap_state;
195
196 if (plane->state && plane->state->fb)
197 drm_framebuffer_unreference(plane->state->fb);
198
199 kfree(plane->state);
200 plane->state = NULL;
201
202 omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
203 if (omap_state == NULL)
204 return;
205
206 /*
207 * Set defaults depending on whether we are a primary or overlay
208 * plane.
209 */
210 omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
211 ? 0 : omap_plane->id;
212 omap_state->base.rotation = BIT(DRM_ROTATE_0);
213
214 plane->state = &omap_state->base;
215 plane->state->plane = plane;
216}
217
218static void omap_plane_destroy(struct drm_plane *plane) 191static void omap_plane_destroy(struct drm_plane *plane)
219{ 192{
220 struct omap_plane *omap_plane = to_omap_plane(plane); 193 struct omap_plane *omap_plane = to_omap_plane(plane);
@@ -270,6 +243,32 @@ static void omap_plane_atomic_destroy_state(struct drm_plane *plane,
270 kfree(to_omap_plane_state(state)); 243 kfree(to_omap_plane_state(state));
271} 244}
272 245
246static void omap_plane_reset(struct drm_plane *plane)
247{
248 struct omap_plane *omap_plane = to_omap_plane(plane);
249 struct omap_plane_state *omap_state;
250
251 if (plane->state) {
252 omap_plane_atomic_destroy_state(plane, plane->state);
253 plane->state = NULL;
254 }
255
256 omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
257 if (omap_state == NULL)
258 return;
259
260 /*
261 * Set defaults depending on whether we are a primary or overlay
262 * plane.
263 */
264 omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
265 ? 0 : omap_plane->id;
266 omap_state->base.rotation = BIT(DRM_ROTATE_0);
267
268 plane->state = &omap_state->base;
269 plane->state->plane = plane;
270}
271
273static int omap_plane_atomic_set_property(struct drm_plane *plane, 272static int omap_plane_atomic_set_property(struct drm_plane *plane,
274 struct drm_plane_state *state, 273 struct drm_plane_state *state,
275 struct drm_property *property, 274 struct drm_property *property,
@@ -366,7 +365,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
366 365
367 ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, 366 ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
368 &omap_plane_funcs, omap_plane->formats, 367 &omap_plane_funcs, omap_plane->formats,
369 omap_plane->nformats, type); 368 omap_plane->nformats, type, NULL);
370 if (ret < 0) 369 if (ret < 0)
371 goto error; 370 goto error;
372 371
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index efb609510540..c10fdfc0930f 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -5,8 +5,9 @@
5 * 5 *
6 * Authors: Ravi Ramachandra <r.ramachandra@ti.com>, 6 * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
7 * Lajos Molnar <molnar@ti.com> 7 * Lajos Molnar <molnar@ti.com>
8 * Andy Gross <andy.gross@ti.com>
8 * 9 *
9 * Copyright (C) 2009-2010 Texas Instruments, Inc. 10 * Copyright (C) 2012 Texas Instruments, Inc.
10 * 11 *
11 * This package is free software; you can redistribute it and/or modify 12 * This package is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -17,687 +18,244 @@
17 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 18 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 * 19 *
19 */ 20 */
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/bitmap.h>
20#include <linux/slab.h> 27#include <linux/slab.h>
21#include <linux/spinlock.h> 28#include "tcm.h"
22 29
23#include "tcm-sita.h" 30static unsigned long mask[8];
24 31/*
25#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1)) 32 * pos position in bitmap
26 33 * w width in slots
27/* Individual selection criteria for different scan areas */ 34 * h height in slots
28static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL; 35 * map ptr to bitmap
29static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE; 36 * stride slots in a row
30
31/*********************************************
32 * TCM API - Sita Implementation
33 *********************************************/
34static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
35 struct tcm_area *area);
36static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
37static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
38static void sita_deinit(struct tcm *tcm);
39
40/*********************************************
41 * Main Scanner functions
42 *********************************************/
43static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
44 struct tcm_area *area);
45
46static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
47 struct tcm_area *field, struct tcm_area *area);
48
49static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
50 struct tcm_area *field, struct tcm_area *area);
51
52static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
53 struct tcm_area *field, struct tcm_area *area);
54
55/*********************************************
56 * Support Infrastructure Methods
57 *********************************************/
58static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
59
60static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
61 struct tcm_area *field, s32 criteria,
62 struct score *best);
63
64static void get_nearness_factor(struct tcm_area *field,
65 struct tcm_area *candidate,
66 struct nearness_factor *nf);
67
68static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
69 struct neighbor_stats *stat);
70
71static void fill_area(struct tcm *tcm,
72 struct tcm_area *area, struct tcm_area *parent);
73
74
75/*********************************************/
76
77/*********************************************
78 * Utility Methods
79 *********************************************/
80struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
81{
82 struct tcm *tcm;
83 struct sita_pvt *pvt;
84 struct tcm_area area = {0};
85 s32 i;
86
87 if (width == 0 || height == 0)
88 return NULL;
89
90 tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
91 pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
92 if (!tcm || !pvt)
93 goto error;
94
95 memset(tcm, 0, sizeof(*tcm));
96 memset(pvt, 0, sizeof(*pvt));
97
98 /* Updating the pointers to SiTA implementation APIs */
99 tcm->height = height;
100 tcm->width = width;
101 tcm->reserve_2d = sita_reserve_2d;
102 tcm->reserve_1d = sita_reserve_1d;
103 tcm->free = sita_free;
104 tcm->deinit = sita_deinit;
105 tcm->pvt = (void *)pvt;
106
107 spin_lock_init(&(pvt->lock));
108
109 /* Creating tam map */
110 pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
111 if (!pvt->map)
112 goto error;
113
114 for (i = 0; i < tcm->width; i++) {
115 pvt->map[i] =
116 kmalloc(sizeof(**pvt->map) * tcm->height,
117 GFP_KERNEL);
118 if (pvt->map[i] == NULL) {
119 while (i--)
120 kfree(pvt->map[i]);
121 kfree(pvt->map);
122 goto error;
123 }
124 }
125
126 if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
127 pvt->div_pt.x = attr->x;
128 pvt->div_pt.y = attr->y;
129
130 } else {
131 /* Defaulting to 3:1 ratio on width for 2D area split */
132 /* Defaulting to 3:1 ratio on height for 2D and 1D split */
133 pvt->div_pt.x = (tcm->width * 3) / 4;
134 pvt->div_pt.y = (tcm->height * 3) / 4;
135 }
136
137 spin_lock(&(pvt->lock));
138 assign(&area, 0, 0, width - 1, height - 1);
139 fill_area(tcm, &area, NULL);
140 spin_unlock(&(pvt->lock));
141 return tcm;
142
143error:
144 kfree(tcm);
145 kfree(pvt);
146 return NULL;
147}
148
149static void sita_deinit(struct tcm *tcm)
150{
151 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
152 struct tcm_area area = {0};
153 s32 i;
154
155 area.p1.x = tcm->width - 1;
156 area.p1.y = tcm->height - 1;
157
158 spin_lock(&(pvt->lock));
159 fill_area(tcm, &area, NULL);
160 spin_unlock(&(pvt->lock));
161
162 for (i = 0; i < tcm->height; i++)
163 kfree(pvt->map[i]);
164 kfree(pvt->map);
165 kfree(pvt);
166}
167
168/**
169 * Reserve a 1D area in the container
170 *
171 * @param num_slots size of 1D area
172 * @param area pointer to the area that will be populated with the
173 * reserved area
174 *
175 * @return 0 on success, non-0 error value on failure.
176 */ 37 */
177static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots, 38static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
178 struct tcm_area *area) 39 unsigned long *map, uint16_t stride)
179{ 40{
180 s32 ret; 41 int i;
181 struct tcm_area field = {0};
182 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
183
184 spin_lock(&(pvt->lock));
185
186 /* Scanning entire container */
187 assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
188 42
189 ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area); 43 for (i = 0; i < h; i++, pos += stride)
190 if (!ret) 44 bitmap_clear(map, pos, w);
191 /* update map */
192 fill_area(tcm, area, area);
193
194 spin_unlock(&(pvt->lock));
195 return ret;
196} 45}
197 46
198/** 47/*
199 * Reserve a 2D area in the container 48 * w width in slots
200 * 49 * pos ptr to position
201 * @param w width 50 * map ptr to bitmap
202 * @param h height 51 * num_bits number of bits in bitmap
203 * @param area pointer to the area that will be populated with the reserved
204 * area
205 *
206 * @return 0 on success, non-0 error value on failure.
207 */ 52 */
208static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, 53static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
209 struct tcm_area *area) 54 size_t num_bits)
210{ 55{
211 s32 ret; 56 unsigned long search_count = 0;
212 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; 57 unsigned long bit;
58 bool area_found = false;
213 59
214 /* not supporting more than 64 as alignment */ 60 *pos = num_bits - w;
215 if (align > 64)
216 return -EINVAL;
217 61
218 /* we prefer 1, 32 and 64 as alignment */ 62 while (search_count < num_bits) {
219 align = align <= 1 ? 1 : align <= 32 ? 32 : 64; 63 bit = find_next_bit(map, num_bits, *pos);
220 64
221 spin_lock(&(pvt->lock)); 65 if (bit - *pos >= w) {
222 ret = scan_areas_and_find_fit(tcm, w, h, align, area); 66 /* found a long enough free area */
223 if (!ret) 67 bitmap_set(map, *pos, w);
224 /* update map */ 68 area_found = true;
225 fill_area(tcm, area, area); 69 break;
70 }
226 71
227 spin_unlock(&(pvt->lock)); 72 search_count = num_bits - bit + w;
228 return ret; 73 *pos = bit - w;
74 }
75
76 return (area_found) ? 0 : -ENOMEM;
229} 77}
230 78
231/** 79/*
232 * Unreserve a previously allocated 2D or 1D area 80 * w = width in slots
233 * @param area area to be freed 81 * h = height in slots
234 * @return 0 - success 82 * a = align in slots (mask, 2^n-1, 0 is unaligned)
83 * offset = offset in bytes from 4KiB
84 * pos = position in bitmap for buffer
85 * map = bitmap ptr
86 * num_bits = size of bitmap
87 * stride = bits in one row of container
235 */ 88 */
236static s32 sita_free(struct tcm *tcm, struct tcm_area *area) 89static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
90 unsigned long *pos, unsigned long slot_bytes,
91 unsigned long *map, size_t num_bits, size_t slot_stride)
237{ 92{
238 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; 93 int i;
239 94 unsigned long index;
240 spin_lock(&(pvt->lock)); 95 bool area_free;
96 unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
97 unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
98 unsigned long curr_bit = bit_offset;
99
100 /* reset alignment to 1 if we are matching a specific offset */
101 /* adjust alignment - 1 to get to the format expected in bitmaps */
102 a = (offset > 0) ? 0 : a - 1;
103
104 /* FIXME Return error if slots_per_band > stride */
105
106 while (curr_bit < num_bits) {
107 *pos = bitmap_find_next_zero_area(map, num_bits, curr_bit, w,
108 a);
109
110 /* skip forward if we are not at right offset */
111 if (bit_offset > 0 && (*pos % slots_per_band != bit_offset)) {
112 curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
113 continue;
114 }
241 115
242 /* check that this is in fact an existing area */ 116 /* skip forward to next row if we overlap end of row */
243 WARN_ON(pvt->map[area->p0.x][area->p0.y] != area || 117 if ((*pos % slot_stride) + w > slot_stride) {
244 pvt->map[area->p1.x][area->p1.y] != area); 118 curr_bit = ALIGN(*pos, slot_stride) + bit_offset;
119 continue;
120 }
245 121
246 /* Clear the contents of the associated tiles in the map */ 122 /* TODO: Handle overlapping 4K boundaries */
247 fill_area(tcm, area, NULL);
248 123
249 spin_unlock(&(pvt->lock)); 124 /* break out of look if we will go past end of container */
125 if ((*pos + slot_stride * h) > num_bits)
126 break;
250 127
251 return 0; 128 /* generate mask that represents out matching pattern */
252} 129 bitmap_clear(mask, 0, slot_stride);
130 bitmap_set(mask, (*pos % BITS_PER_LONG), w);
253 131
254/** 132 /* assume the area is free until we find an overlap */
255 * Note: In general the cordinates in the scan field area relevant to the can 133 area_free = true;
256 * sweep directions. The scan origin (e.g. top-left corner) will always be
257 * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
258 * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
259 * <= p0.y
260 */
261 134
262/** 135 /* check subsequent rows to see if complete area is free */
263 * Raster scan horizontally right to left from top to bottom to find a place for 136 for (i = 1; i < h; i++) {
264 * a 2D area of given size inside a scan field. 137 index = *pos / BITS_PER_LONG + i * 8;
265 * 138 if (bitmap_intersects(&map[index], mask,
266 * @param w width of desired area 139 (*pos % BITS_PER_LONG) + w)) {
267 * @param h height of desired area 140 area_free = false;
268 * @param align desired area alignment
269 * @param area pointer to the area that will be set to the best position
270 * @param field area to scan (inclusive)
271 *
272 * @return 0 on success, non-0 error value on failure.
273 */
274static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
275 struct tcm_area *field, struct tcm_area *area)
276{
277 s32 x, y;
278 s16 start_x, end_x, start_y, end_y, found_x = -1;
279 struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
280 struct score best = {{0}, {0}, {0}, 0};
281
282 start_x = field->p0.x;
283 end_x = field->p1.x;
284 start_y = field->p0.y;
285 end_y = field->p1.y;
286
287 /* check scan area co-ordinates */
288 if (field->p0.x < field->p1.x ||
289 field->p1.y < field->p0.y)
290 return -EINVAL;
291
292 /* check if allocation would fit in scan area */
293 if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
294 return -ENOSPC;
295
296 /* adjust start_x and end_y, as allocation would not fit beyond */
297 start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
298 end_y = end_y - h + 1;
299
300 /* check if allocation would still fit in scan area */
301 if (start_x < end_x)
302 return -ENOSPC;
303
304 /* scan field top-to-bottom, right-to-left */
305 for (y = start_y; y <= end_y; y++) {
306 for (x = start_x; x >= end_x; x -= align) {
307 if (is_area_free(map, x, y, w, h)) {
308 found_x = x;
309
310 /* update best candidate */
311 if (update_candidate(tcm, x, y, w, h, field,
312 CR_R2L_T2B, &best))
313 goto done;
314
315 /* change upper x bound */
316 end_x = x + 1;
317 break; 141 break;
318 } else if (map[x][y] && map[x][y]->is2d) {
319 /* step over 2D areas */
320 x = ALIGN(map[x][y]->p0.x - w + 1, align);
321 } 142 }
322 } 143 }
323 144
324 /* break if you find a free area shouldering the scan field */ 145 if (area_free)
325 if (found_x == start_x)
326 break; 146 break;
327 }
328
329 if (!best.a.tcm)
330 return -ENOSPC;
331done:
332 assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
333 return 0;
334}
335
336/**
337 * Raster scan horizontally left to right from top to bottom to find a place for
338 * a 2D area of given size inside a scan field.
339 *
340 * @param w width of desired area
341 * @param h height of desired area
342 * @param align desired area alignment
343 * @param area pointer to the area that will be set to the best position
344 * @param field area to scan (inclusive)
345 *
346 * @return 0 on success, non-0 error value on failure.
347 */
348static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
349 struct tcm_area *field, struct tcm_area *area)
350{
351 s32 x, y;
352 s16 start_x, end_x, start_y, end_y, found_x = -1;
353 struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
354 struct score best = {{0}, {0}, {0}, 0};
355
356 start_x = field->p0.x;
357 end_x = field->p1.x;
358 start_y = field->p0.y;
359 end_y = field->p1.y;
360
361 /* check scan area co-ordinates */
362 if (field->p1.x < field->p0.x ||
363 field->p1.y < field->p0.y)
364 return -EINVAL;
365
366 /* check if allocation would fit in scan area */
367 if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
368 return -ENOSPC;
369
370 start_x = ALIGN(start_x, align);
371
372 /* check if allocation would still fit in scan area */
373 if (w > LEN(end_x, start_x))
374 return -ENOSPC;
375
376 /* adjust end_x and end_y, as allocation would not fit beyond */
377 end_x = end_x - w + 1; /* + 1 to be inclusive */
378 end_y = end_y - h + 1;
379
380 /* scan field top-to-bottom, left-to-right */
381 for (y = start_y; y <= end_y; y++) {
382 for (x = start_x; x <= end_x; x += align) {
383 if (is_area_free(map, x, y, w, h)) {
384 found_x = x;
385
386 /* update best candidate */
387 if (update_candidate(tcm, x, y, w, h, field,
388 CR_L2R_T2B, &best))
389 goto done;
390 /* change upper x bound */
391 end_x = x - 1;
392 147
393 break; 148 /* go forward past this match */
394 } else if (map[x][y] && map[x][y]->is2d) { 149 if (bit_offset > 0)
395 /* step over 2D areas */ 150 curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
396 x = ALIGN_DOWN(map[x][y]->p1.x, align); 151 else
397 } 152 curr_bit = *pos + a + 1;
398 } 153 }
399 154
400 /* break if you find a free area shouldering the scan field */ 155 if (area_free) {
401 if (found_x == start_x) 156 /* set area as in-use. iterate over rows */
402 break; 157 for (i = 0, index = *pos; i < h; i++, index += slot_stride)
158 bitmap_set(map, index, w);
403 } 159 }
404 160
405 if (!best.a.tcm) 161 return (area_free) ? 0 : -ENOMEM;
406 return -ENOSPC;
407done:
408 assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
409 return 0;
410} 162}
411 163
412/** 164static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
413 * Raster scan horizontally right to left from bottom to top to find a place 165 struct tcm_area *area)
414 * for a 1D area of given size inside a scan field.
415 *
416 * @param num_slots size of desired area
417 * @param align desired area alignment
418 * @param area pointer to the area that will be set to the best
419 * position
420 * @param field area to scan (inclusive)
421 *
422 * @return 0 on success, non-0 error value on failure.
423 */
424static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
425 struct tcm_area *field, struct tcm_area *area)
426{ 166{
427 s32 found = 0; 167 unsigned long pos;
428 s16 x, y; 168 int ret;
429 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; 169
430 struct tcm_area *p; 170 spin_lock(&(tcm->lock));
431 171 ret = r2l_b2t_1d(num_slots, &pos, tcm->bitmap, tcm->map_size);
432 /* check scan area co-ordinates */ 172 if (!ret) {
433 if (field->p0.y < field->p1.y) 173 area->p0.x = pos % tcm->width;
434 return -EINVAL; 174 area->p0.y = pos / tcm->width;
435 175 area->p1.x = (pos + num_slots - 1) % tcm->width;
436 /** 176 area->p1.y = (pos + num_slots - 1) / tcm->width;
437 * Currently we only support full width 1D scan field, which makes sense
438 * since 1D slot-ordering spans the full container width.
439 */
440 if (tcm->width != field->p0.x - field->p1.x + 1)
441 return -EINVAL;
442
443 /* check if allocation would fit in scan area */
444 if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
445 return -ENOSPC;
446
447 x = field->p0.x;
448 y = field->p0.y;
449
450 /* find num_slots consecutive free slots to the left */
451 while (found < num_slots) {
452 if (y < 0)
453 return -ENOSPC;
454
455 /* remember bottom-right corner */
456 if (found == 0) {
457 area->p1.x = x;
458 area->p1.y = y;
459 }
460
461 /* skip busy regions */
462 p = pvt->map[x][y];
463 if (p) {
464 /* move to left of 2D areas, top left of 1D */
465 x = p->p0.x;
466 if (!p->is2d)
467 y = p->p0.y;
468
469 /* start over */
470 found = 0;
471 } else {
472 /* count consecutive free slots */
473 found++;
474 if (found == num_slots)
475 break;
476 }
477
478 /* move to the left */
479 if (x == 0)
480 y--;
481 x = (x ? : tcm->width) - 1;
482
483 } 177 }
178 spin_unlock(&(tcm->lock));
484 179
485 /* set top-left corner */ 180 return ret;
486 area->p0.x = x;
487 area->p0.y = y;
488 return 0;
489} 181}
490 182
491/** 183static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align,
492 * Find a place for a 2D area of given size inside a scan field based on its 184 int16_t offset, uint16_t slot_bytes,
493 * alignment needs. 185 struct tcm_area *area)
494 *
495 * @param w width of desired area
496 * @param h height of desired area
497 * @param align desired area alignment
498 * @param area pointer to the area that will be set to the best position
499 *
500 * @return 0 on success, non-0 error value on failure.
501 */
502static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
503 struct tcm_area *area)
504{ 186{
505 s32 ret = 0; 187 unsigned long pos;
506 struct tcm_area field = {0}; 188 int ret;
507 u16 boundary_x, boundary_y; 189
508 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; 190 spin_lock(&(tcm->lock));
509 191 ret = l2r_t2b(w, h, align, offset, &pos, slot_bytes, tcm->bitmap,
510 if (align > 1) { 192 tcm->map_size, tcm->width);
511 /* prefer top-left corner */ 193
512 boundary_x = pvt->div_pt.x - 1; 194 if (!ret) {
513 boundary_y = pvt->div_pt.y - 1; 195 area->p0.x = pos % tcm->width;
514 196 area->p0.y = pos / tcm->width;
515 /* expand width and height if needed */ 197 area->p1.x = area->p0.x + w - 1;
516 if (w > pvt->div_pt.x) 198 area->p1.y = area->p0.y + h - 1;
517 boundary_x = tcm->width - 1;
518 if (h > pvt->div_pt.y)
519 boundary_y = tcm->height - 1;
520
521 assign(&field, 0, 0, boundary_x, boundary_y);
522 ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
523
524 /* scan whole container if failed, but do not scan 2x */
525 if (ret != 0 && (boundary_x != tcm->width - 1 ||
526 boundary_y != tcm->height - 1)) {
527 /* scan the entire container if nothing found */
528 assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
529 ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
530 }
531 } else if (align == 1) {
532 /* prefer top-right corner */
533 boundary_x = pvt->div_pt.x;
534 boundary_y = pvt->div_pt.y - 1;
535
536 /* expand width and height if needed */
537 if (w > (tcm->width - pvt->div_pt.x))
538 boundary_x = 0;
539 if (h > pvt->div_pt.y)
540 boundary_y = tcm->height - 1;
541
542 assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
543 ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
544
545 /* scan whole container if failed, but do not scan 2x */
546 if (ret != 0 && (boundary_x != 0 ||
547 boundary_y != tcm->height - 1)) {
548 /* scan the entire container if nothing found */
549 assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
550 ret = scan_r2l_t2b(tcm, w, h, align, &field,
551 area);
552 }
553 } 199 }
200 spin_unlock(&(tcm->lock));
554 201
555 return ret; 202 return ret;
556} 203}
557 204
558/* check if an entire area is free */ 205static void sita_deinit(struct tcm *tcm)
559static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
560{ 206{
561 u16 x = 0, y = 0; 207 kfree(tcm);
562 for (y = y0; y < y0 + h; y++) {
563 for (x = x0; x < x0 + w; x++) {
564 if (map[x][y])
565 return false;
566 }
567 }
568 return true;
569} 208}
570 209
571/* fills an area with a parent tcm_area */ 210static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
572static void fill_area(struct tcm *tcm, struct tcm_area *area,
573 struct tcm_area *parent)
574{ 211{
575 s32 x, y; 212 unsigned long pos;
576 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; 213 uint16_t w, h;
577 struct tcm_area a, a_;
578
579 /* set area's tcm; otherwise, enumerator considers it invalid */
580 area->tcm = tcm;
581
582 tcm_for_each_slice(a, *area, a_) {
583 for (x = a.p0.x; x <= a.p1.x; ++x)
584 for (y = a.p0.y; y <= a.p1.y; ++y)
585 pvt->map[x][y] = parent;
586 214
215 pos = area->p0.x + area->p0.y * tcm->width;
216 if (area->is2d) {
217 w = area->p1.x - area->p0.x + 1;
218 h = area->p1.y - area->p0.y + 1;
219 } else {
220 w = area->p1.x + area->p1.y * tcm->width - pos + 1;
221 h = 1;
587 } 222 }
223
224 spin_lock(&(tcm->lock));
225 free_slots(pos, w, h, tcm->bitmap, tcm->width);
226 spin_unlock(&(tcm->lock));
227 return 0;
588} 228}
589 229
590/** 230struct tcm *sita_init(u16 width, u16 height)
591 * Compares a candidate area to the current best area, and if it is a better
592 * fit, it updates the best to this one.
593 *
594 * @param x0, y0, w, h top, left, width, height of candidate area
595 * @param field scan field
596 * @param criteria scan criteria
597 * @param best best candidate and its scores
598 *
599 * @return 1 (true) if the candidate area is known to be the final best, so no
600 * more searching should be performed
601 */
602static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
603 struct tcm_area *field, s32 criteria,
604 struct score *best)
605{ 231{
606 struct score me; /* score for area */ 232 struct tcm *tcm;
607 233 size_t map_size = BITS_TO_LONGS(width*height) * sizeof(unsigned long);
608 /*
609 * NOTE: For horizontal bias we always give the first found, because our
610 * scan is horizontal-raster-based and the first candidate will always
611 * have the horizontal bias.
612 */
613 bool first = criteria & CR_BIAS_HORIZONTAL;
614
615 assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
616
617 /* calculate score for current candidate */
618 if (!first) {
619 get_neighbor_stats(tcm, &me.a, &me.n);
620 me.neighs = me.n.edge + me.n.busy;
621 get_nearness_factor(field, &me.a, &me.f);
622 }
623
624 /* the 1st candidate is always the best */
625 if (!best->a.tcm)
626 goto better;
627 234
628 BUG_ON(first); 235 if (width == 0 || height == 0)
236 return NULL;
629 237
630 /* diagonal balance check */ 238 tcm = kzalloc(sizeof(*tcm) + map_size, GFP_KERNEL);
631 if ((criteria & CR_DIAGONAL_BALANCE) && 239 if (!tcm)
632 best->neighs <= me.neighs && 240 goto error;
633 (best->neighs < me.neighs ||
634 /* this implies that neighs and occupied match */
635 best->n.busy < me.n.busy ||
636 (best->n.busy == me.n.busy &&
637 /* check the nearness factor */
638 best->f.x + best->f.y > me.f.x + me.f.y)))
639 goto better;
640 241
641 /* not better, keep going */ 242 /* Updating the pointers to SiTA implementation APIs */
642 return 0; 243 tcm->height = height;
244 tcm->width = width;
245 tcm->reserve_2d = sita_reserve_2d;
246 tcm->reserve_1d = sita_reserve_1d;
247 tcm->free = sita_free;
248 tcm->deinit = sita_deinit;
643 249
644better: 250 spin_lock_init(&tcm->lock);
645 /* save current area as best */ 251 tcm->bitmap = (unsigned long *)(tcm + 1);
646 memcpy(best, &me, sizeof(me)); 252 bitmap_clear(tcm->bitmap, 0, width*height);
647 best->a.tcm = tcm;
648 return first;
649}
650 253
651/** 254 tcm->map_size = width*height;
652 * Calculate the nearness factor of an area in a search field. The nearness
653 * factor is smaller if the area is closer to the search origin.
654 */
655static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
656 struct nearness_factor *nf)
657{
658 /**
659 * Using signed math as field coordinates may be reversed if
660 * search direction is right-to-left or bottom-to-top.
661 */
662 nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
663 (field->p1.x - field->p0.x);
664 nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
665 (field->p1.y - field->p0.y);
666}
667 255
668/* get neighbor statistics */ 256 return tcm;
669static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
670 struct neighbor_stats *stat)
671{
672 s16 x = 0, y = 0;
673 struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
674
675 /* Clearing any exisiting values */
676 memset(stat, 0, sizeof(*stat));
677
678 /* process top & bottom edges */
679 for (x = area->p0.x; x <= area->p1.x; x++) {
680 if (area->p0.y == 0)
681 stat->edge++;
682 else if (pvt->map[x][area->p0.y - 1])
683 stat->busy++;
684
685 if (area->p1.y == tcm->height - 1)
686 stat->edge++;
687 else if (pvt->map[x][area->p1.y + 1])
688 stat->busy++;
689 }
690 257
691 /* process left & right edges */ 258error:
692 for (y = area->p0.y; y <= area->p1.y; ++y) { 259 kfree(tcm);
693 if (area->p0.x == 0) 260 return NULL;
694 stat->edge++;
695 else if (pvt->map[area->p0.x - 1][y])
696 stat->busy++;
697
698 if (area->p1.x == tcm->width - 1)
699 stat->edge++;
700 else if (pvt->map[area->p1.x + 1][y])
701 stat->busy++;
702 }
703} 261}
diff --git a/drivers/gpu/drm/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index a8d5ce47686f..ef7df7d6fc84 100644
--- a/drivers/gpu/drm/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
@@ -61,18 +61,17 @@ struct tcm {
61 61
62 unsigned int y_offset; /* offset to use for y coordinates */ 62 unsigned int y_offset; /* offset to use for y coordinates */
63 63
64 /* 'pvt' structure shall contain any tcm details (attr) along with 64 spinlock_t lock;
65 linked list of allocated areas and mutex for mutually exclusive access 65 unsigned long *bitmap;
66 to the list. It may also contain copies of width and height to notice 66 size_t map_size;
67 any changes to the publicly available width and height fields. */
68 void *pvt;
69 67
70 /* function table */ 68 /* function table */
71 s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align, 69 s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align,
70 int16_t offset, uint16_t slot_bytes,
72 struct tcm_area *area); 71 struct tcm_area *area);
73 s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); 72 s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
74 s32 (*free) (struct tcm *tcm, struct tcm_area *area); 73 s32 (*free)(struct tcm *tcm, struct tcm_area *area);
75 void (*deinit) (struct tcm *tcm); 74 void (*deinit)(struct tcm *tcm);
76}; 75};
77 76
78/*============================================================================= 77/*=============================================================================
@@ -91,7 +90,7 @@ struct tcm {
91 * 90 *
92 */ 91 */
93 92
94struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr); 93struct tcm *sita_init(u16 width, u16 height);
95 94
96 95
97/** 96/**
@@ -120,6 +119,9 @@ static inline void tcm_deinit(struct tcm *tcm)
120 * all values may be supported by the container manager, 119 * all values may be supported by the container manager,
121 * but it must support 0 (1), 32 and 64. 120 * but it must support 0 (1), 32 and 64.
122 * 0 value is equivalent to 1. 121 * 0 value is equivalent to 1.
122 * @param offset Offset requirement, in bytes. This is the offset
123 * from a 4KiB aligned virtual address.
124 * @param slot_bytes Width of slot in bytes
123 * @param area Pointer to where the reserved area should be stored. 125 * @param area Pointer to where the reserved area should be stored.
124 * 126 *
125 * @return 0 on success. Non-0 error code on failure. Also, 127 * @return 0 on success. Non-0 error code on failure. Also,
@@ -129,7 +131,8 @@ static inline void tcm_deinit(struct tcm *tcm)
129 * allocation. 131 * allocation.
130 */ 132 */
131static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, 133static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
132 u16 align, struct tcm_area *area) 134 u16 align, int16_t offset, uint16_t slot_bytes,
135 struct tcm_area *area)
133{ 136{
134 /* perform rudimentary error checking */ 137 /* perform rudimentary error checking */
135 s32 res = tcm == NULL ? -ENODEV : 138 s32 res = tcm == NULL ? -ENODEV :
@@ -140,7 +143,8 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
140 143
141 if (!res) { 144 if (!res) {
142 area->is2d = true; 145 area->is2d = true;
143 res = tcm->reserve_2d(tcm, height, width, align, area); 146 res = tcm->reserve_2d(tcm, height, width, align, offset,
147 slot_bytes, area);
144 area->tcm = res ? NULL : tcm; 148 area->tcm = res ? NULL : tcm;
145 } 149 }
146 150
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 7d4704b1292b..1500ab99f548 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -31,6 +31,16 @@ config DRM_PANEL_LG_LG4573
31 Say Y here if you want to enable support for LG4573 RGB panel. 31 Say Y here if you want to enable support for LG4573 RGB panel.
32 To compile this driver as a module, choose M here. 32 To compile this driver as a module, choose M here.
33 33
34config DRM_PANEL_PANASONIC_VVX10F034N00
35 tristate "Panasonic VVX10F034N00 1920x1200 video mode panel"
36 depends on OF
37 depends on DRM_MIPI_DSI
38 depends on BACKLIGHT_CLASS_DEVICE
39 help
40 Say Y here if you want to enable support for Panasonic VVX10F034N00
41 WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
42 Xperia Z2 tablets
43
34config DRM_PANEL_SAMSUNG_S6E8AA0 44config DRM_PANEL_SAMSUNG_S6E8AA0
35 tristate "Samsung S6E8AA0 DSI video mode panel" 45 tristate "Samsung S6E8AA0 DSI video mode panel"
36 depends on OF 46 depends on OF
@@ -51,4 +61,13 @@ config DRM_PANEL_SHARP_LQ101R1SX01
51 To compile this driver as a module, choose M here: the module 61 To compile this driver as a module, choose M here: the module
52 will be called panel-sharp-lq101r1sx01. 62 will be called panel-sharp-lq101r1sx01.
53 63
64config DRM_PANEL_SHARP_LS043T1LE01
65 tristate "Sharp LS043T1LE01 qHD video mode panel"
66 depends on OF
67 depends on DRM_MIPI_DSI
68 depends on BACKLIGHT_CLASS_DEVICE
69 help
70 Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
71 (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
72
54endmenu 73endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d0f016dd7ddb..f277eed933d6 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,7 @@
1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o 1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
2obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o 2obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
3obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
3obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o 4obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
4obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o 5obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
5obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o 6obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
7obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
new file mode 100644
index 000000000000..7f915f706fa6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright (C) 2015 Red Hat
3 * Copyright (C) 2015 Sony Mobile Communications Inc.
4 * Author: Werner Johansson <werner.johansson@sonymobile.com>
5 *
6 * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/backlight.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/regulator/consumer.h>
25
26#include <drm/drmP.h>
27#include <drm/drm_crtc.h>
28#include <drm/drm_mipi_dsi.h>
29#include <drm/drm_panel.h>
30
31#include <video/mipi_display.h>
32
33/*
34 * When power is turned off to this panel a minimum off time of 500ms has to be
35 * observed before powering back on as there's no external reset pin. Keep
36 * track of earliest wakeup time and delay subsequent prepare call accordingly
37 */
38#define MIN_POFF_MS (500)
39
40struct wuxga_nt_panel {
41 struct drm_panel base;
42 struct mipi_dsi_device *dsi;
43
44 struct backlight_device *backlight;
45 struct regulator *supply;
46
47 bool prepared;
48 bool enabled;
49
50 ktime_t earliest_wake;
51
52 const struct drm_display_mode *mode;
53};
54
55static inline struct wuxga_nt_panel *to_wuxga_nt_panel(struct drm_panel *panel)
56{
57 return container_of(panel, struct wuxga_nt_panel, base);
58}
59
60static int wuxga_nt_panel_on(struct wuxga_nt_panel *wuxga_nt)
61{
62 struct mipi_dsi_device *dsi = wuxga_nt->dsi;
63 int ret;
64
65 ret = mipi_dsi_turn_on_peripheral(dsi);
66 if (ret < 0)
67 return ret;
68
69 return 0;
70}
71
72static int wuxga_nt_panel_disable(struct drm_panel *panel)
73{
74 struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
75
76 if (!wuxga_nt->enabled)
77 return 0;
78
79 mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
80
81 if (wuxga_nt->backlight) {
82 wuxga_nt->backlight->props.power = FB_BLANK_POWERDOWN;
83 wuxga_nt->backlight->props.state |= BL_CORE_FBBLANK;
84 backlight_update_status(wuxga_nt->backlight);
85 }
86
87 wuxga_nt->enabled = false;
88
89 return 0;
90}
91
92static int wuxga_nt_panel_unprepare(struct drm_panel *panel)
93{
94 struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
95
96 if (!wuxga_nt->prepared)
97 return 0;
98
99 regulator_disable(wuxga_nt->supply);
100 wuxga_nt->earliest_wake = ktime_add_ms(ktime_get_real(), MIN_POFF_MS);
101 wuxga_nt->prepared = false;
102
103 return 0;
104}
105
106static int wuxga_nt_panel_prepare(struct drm_panel *panel)
107{
108 struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
109 int ret;
110 s64 enablewait;
111
112 if (wuxga_nt->prepared)
113 return 0;
114
115 /*
116 * If the user re-enabled the panel before the required off-time then
117 * we need to wait the remaining period before re-enabling regulator
118 */
119 enablewait = ktime_ms_delta(wuxga_nt->earliest_wake, ktime_get_real());
120
121 /* Sanity check, this should never happen */
122 if (enablewait > MIN_POFF_MS)
123 enablewait = MIN_POFF_MS;
124
125 if (enablewait > 0)
126 msleep(enablewait);
127
128 ret = regulator_enable(wuxga_nt->supply);
129 if (ret < 0)
130 return ret;
131
132 /*
133 * A minimum delay of 250ms is required after power-up until commands
134 * can be sent
135 */
136 msleep(250);
137
138 ret = wuxga_nt_panel_on(wuxga_nt);
139 if (ret < 0) {
140 dev_err(panel->dev, "failed to set panel on: %d\n", ret);
141 goto poweroff;
142 }
143
144 wuxga_nt->prepared = true;
145
146 return 0;
147
148poweroff:
149 regulator_disable(wuxga_nt->supply);
150
151 return ret;
152}
153
154static int wuxga_nt_panel_enable(struct drm_panel *panel)
155{
156 struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
157
158 if (wuxga_nt->enabled)
159 return 0;
160
161 if (wuxga_nt->backlight) {
162 wuxga_nt->backlight->props.power = FB_BLANK_UNBLANK;
163 wuxga_nt->backlight->props.state &= ~BL_CORE_FBBLANK;
164 backlight_update_status(wuxga_nt->backlight);
165 }
166
167 wuxga_nt->enabled = true;
168
169 return 0;
170}
171
172static const struct drm_display_mode default_mode = {
173 .clock = 164402,
174 .hdisplay = 1920,
175 .hsync_start = 1920 + 152,
176 .hsync_end = 1920 + 152 + 52,
177 .htotal = 1920 + 152 + 52 + 20,
178 .vdisplay = 1200,
179 .vsync_start = 1200 + 24,
180 .vsync_end = 1200 + 24 + 6,
181 .vtotal = 1200 + 24 + 6 + 48,
182 .vrefresh = 60,
183};
184
185static int wuxga_nt_panel_get_modes(struct drm_panel *panel)
186{
187 struct drm_display_mode *mode;
188
189 mode = drm_mode_duplicate(panel->drm, &default_mode);
190 if (!mode) {
191 dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
192 default_mode.hdisplay, default_mode.vdisplay,
193 default_mode.vrefresh);
194 return -ENOMEM;
195 }
196
197 drm_mode_set_name(mode);
198
199 drm_mode_probed_add(panel->connector, mode);
200
201 panel->connector->display_info.width_mm = 217;
202 panel->connector->display_info.height_mm = 136;
203
204 return 1;
205}
206
207static const struct drm_panel_funcs wuxga_nt_panel_funcs = {
208 .disable = wuxga_nt_panel_disable,
209 .unprepare = wuxga_nt_panel_unprepare,
210 .prepare = wuxga_nt_panel_prepare,
211 .enable = wuxga_nt_panel_enable,
212 .get_modes = wuxga_nt_panel_get_modes,
213};
214
215static const struct of_device_id wuxga_nt_of_match[] = {
216 { .compatible = "panasonic,vvx10f034n00", },
217 { }
218};
219MODULE_DEVICE_TABLE(of, wuxga_nt_of_match);
220
221static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
222{
223 struct device *dev = &wuxga_nt->dsi->dev;
224 struct device_node *np;
225 int ret;
226
227 wuxga_nt->mode = &default_mode;
228
229 wuxga_nt->supply = devm_regulator_get(dev, "power");
230 if (IS_ERR(wuxga_nt->supply))
231 return PTR_ERR(wuxga_nt->supply);
232
233 np = of_parse_phandle(dev->of_node, "backlight", 0);
234 if (np) {
235 wuxga_nt->backlight = of_find_backlight_by_node(np);
236 of_node_put(np);
237
238 if (!wuxga_nt->backlight)
239 return -EPROBE_DEFER;
240 }
241
242 drm_panel_init(&wuxga_nt->base);
243 wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
244 wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
245
246 ret = drm_panel_add(&wuxga_nt->base);
247 if (ret < 0)
248 goto put_backlight;
249
250 return 0;
251
252put_backlight:
253 if (wuxga_nt->backlight)
254 put_device(&wuxga_nt->backlight->dev);
255
256 return ret;
257}
258
259static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
260{
261 if (wuxga_nt->base.dev)
262 drm_panel_remove(&wuxga_nt->base);
263
264 if (wuxga_nt->backlight)
265 put_device(&wuxga_nt->backlight->dev);
266}
267
268static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
269{
270 struct wuxga_nt_panel *wuxga_nt;
271 int ret;
272
273 dsi->lanes = 4;
274 dsi->format = MIPI_DSI_FMT_RGB888;
275 dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
276 MIPI_DSI_MODE_VIDEO_HSE |
277 MIPI_DSI_CLOCK_NON_CONTINUOUS |
278 MIPI_DSI_MODE_LPM;
279
280 wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL);
281 if (!wuxga_nt)
282 return -ENOMEM;
283
284 mipi_dsi_set_drvdata(dsi, wuxga_nt);
285
286 wuxga_nt->dsi = dsi;
287
288 ret = wuxga_nt_panel_add(wuxga_nt);
289 if (ret < 0)
290 return ret;
291
292 return mipi_dsi_attach(dsi);
293}
294
295static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
296{
297 struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
298 int ret;
299
300 ret = wuxga_nt_panel_disable(&wuxga_nt->base);
301 if (ret < 0)
302 dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
303
304 ret = mipi_dsi_detach(dsi);
305 if (ret < 0)
306 dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
307
308 drm_panel_detach(&wuxga_nt->base);
309 wuxga_nt_panel_del(wuxga_nt);
310
311 return 0;
312}
313
314static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
315{
316 struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
317
318 wuxga_nt_panel_disable(&wuxga_nt->base);
319}
320
321static struct mipi_dsi_driver wuxga_nt_panel_driver = {
322 .driver = {
323 .name = "panel-panasonic-vvx10f034n00",
324 .of_match_table = wuxga_nt_of_match,
325 },
326 .probe = wuxga_nt_panel_probe,
327 .remove = wuxga_nt_panel_remove,
328 .shutdown = wuxga_nt_panel_shutdown,
329};
330module_mipi_dsi_driver(wuxga_nt_panel_driver);
331
332MODULE_AUTHOR("Werner Johansson <werner.johansson@sonymobile.com>");
333MODULE_DESCRIPTION("Panasonic VVX10F034N00 Novatek NT1397-based WUXGA (1920x1200) video mode panel driver");
334MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
new file mode 100644
index 000000000000..3aeb0bda4947
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -0,0 +1,387 @@
1/*
2 * Copyright (C) 2015 Red Hat
3 * Copyright (C) 2015 Sony Mobile Communications Inc.
4 * Author: Werner Johansson <werner.johansson@sonymobile.com>
5 *
6 * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/backlight.h>
22#include <linux/gpio/consumer.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/regulator/consumer.h>
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include <drm/drm_mipi_dsi.h>
30#include <drm/drm_panel.h>
31
32#include <video/mipi_display.h>
33
34struct sharp_nt_panel {
35 struct drm_panel base;
36 struct mipi_dsi_device *dsi;
37
38 struct backlight_device *backlight;
39 struct regulator *supply;
40 struct gpio_desc *reset_gpio;
41
42 bool prepared;
43 bool enabled;
44
45 const struct drm_display_mode *mode;
46};
47
48static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
49{
50 return container_of(panel, struct sharp_nt_panel, base);
51}
52
53static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
54{
55 struct mipi_dsi_device *dsi = sharp_nt->dsi;
56 int ret;
57
58 dsi->mode_flags |= MIPI_DSI_MODE_LPM;
59
60 ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
61 if (ret < 0)
62 return ret;
63
64 msleep(120);
65
66 /* Novatek two-lane operation */
67 ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
68 if (ret < 0)
69 return ret;
70
71 /* Set both MCU and RGB I/F to 24bpp */
72 ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
73 (MIPI_DCS_PIXEL_FMT_24BIT << 4));
74 if (ret < 0)
75 return ret;
76
77 return 0;
78}
79
80static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
81{
82 struct mipi_dsi_device *dsi = sharp_nt->dsi;
83 int ret;
84
85 dsi->mode_flags |= MIPI_DSI_MODE_LPM;
86
87 ret = mipi_dsi_dcs_set_display_on(dsi);
88 if (ret < 0)
89 return ret;
90
91 return 0;
92}
93
94static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
95{
96 struct mipi_dsi_device *dsi = sharp_nt->dsi;
97 int ret;
98
99 dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
100
101 ret = mipi_dsi_dcs_set_display_off(dsi);
102 if (ret < 0)
103 return ret;
104
105 ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
106 if (ret < 0)
107 return ret;
108
109 return 0;
110}
111
112
113static int sharp_nt_panel_disable(struct drm_panel *panel)
114{
115 struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
116
117 if (!sharp_nt->enabled)
118 return 0;
119
120 if (sharp_nt->backlight) {
121 sharp_nt->backlight->props.power = FB_BLANK_POWERDOWN;
122 backlight_update_status(sharp_nt->backlight);
123 }
124
125 sharp_nt->enabled = false;
126
127 return 0;
128}
129
130static int sharp_nt_panel_unprepare(struct drm_panel *panel)
131{
132 struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
133 int ret;
134
135 if (!sharp_nt->prepared)
136 return 0;
137
138 ret = sharp_nt_panel_off(sharp_nt);
139 if (ret < 0) {
140 dev_err(panel->dev, "failed to set panel off: %d\n", ret);
141 return ret;
142 }
143
144 regulator_disable(sharp_nt->supply);
145 if (sharp_nt->reset_gpio)
146 gpiod_set_value(sharp_nt->reset_gpio, 0);
147
148 sharp_nt->prepared = false;
149
150 return 0;
151}
152
153static int sharp_nt_panel_prepare(struct drm_panel *panel)
154{
155 struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
156 int ret;
157
158 if (sharp_nt->prepared)
159 return 0;
160
161 ret = regulator_enable(sharp_nt->supply);
162 if (ret < 0)
163 return ret;
164
165 msleep(20);
166
167 if (sharp_nt->reset_gpio) {
168 gpiod_set_value(sharp_nt->reset_gpio, 1);
169 msleep(1);
170 gpiod_set_value(sharp_nt->reset_gpio, 0);
171 msleep(1);
172 gpiod_set_value(sharp_nt->reset_gpio, 1);
173 msleep(10);
174 }
175
176 ret = sharp_nt_panel_init(sharp_nt);
177 if (ret < 0) {
178 dev_err(panel->dev, "failed to init panel: %d\n", ret);
179 goto poweroff;
180 }
181
182 ret = sharp_nt_panel_on(sharp_nt);
183 if (ret < 0) {
184 dev_err(panel->dev, "failed to set panel on: %d\n", ret);
185 goto poweroff;
186 }
187
188 sharp_nt->prepared = true;
189
190 return 0;
191
192poweroff:
193 regulator_disable(sharp_nt->supply);
194 if (sharp_nt->reset_gpio)
195 gpiod_set_value(sharp_nt->reset_gpio, 0);
196 return ret;
197}
198
199static int sharp_nt_panel_enable(struct drm_panel *panel)
200{
201 struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
202
203 if (sharp_nt->enabled)
204 return 0;
205
206 if (sharp_nt->backlight) {
207 sharp_nt->backlight->props.power = FB_BLANK_UNBLANK;
208 backlight_update_status(sharp_nt->backlight);
209 }
210
211 sharp_nt->enabled = true;
212
213 return 0;
214}
215
216static const struct drm_display_mode default_mode = {
217 .clock = 41118,
218 .hdisplay = 540,
219 .hsync_start = 540 + 48,
220 .hsync_end = 540 + 48 + 80,
221 .htotal = 540 + 48 + 80 + 32,
222 .vdisplay = 960,
223 .vsync_start = 960 + 3,
224 .vsync_end = 960 + 3 + 15,
225 .vtotal = 960 + 3 + 15 + 1,
226 .vrefresh = 60,
227};
228
229static int sharp_nt_panel_get_modes(struct drm_panel *panel)
230{
231 struct drm_display_mode *mode;
232
233 mode = drm_mode_duplicate(panel->drm, &default_mode);
234 if (!mode) {
235 dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
236 default_mode.hdisplay, default_mode.vdisplay,
237 default_mode.vrefresh);
238 return -ENOMEM;
239 }
240
241 drm_mode_set_name(mode);
242
243 drm_mode_probed_add(panel->connector, mode);
244
245 panel->connector->display_info.width_mm = 54;
246 panel->connector->display_info.height_mm = 95;
247
248 return 1;
249}
250
251static const struct drm_panel_funcs sharp_nt_panel_funcs = {
252 .disable = sharp_nt_panel_disable,
253 .unprepare = sharp_nt_panel_unprepare,
254 .prepare = sharp_nt_panel_prepare,
255 .enable = sharp_nt_panel_enable,
256 .get_modes = sharp_nt_panel_get_modes,
257};
258
259static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
260{
261 struct device *dev = &sharp_nt->dsi->dev;
262 struct device_node *np;
263 int ret;
264
265 sharp_nt->mode = &default_mode;
266
267 sharp_nt->supply = devm_regulator_get(dev, "avdd");
268 if (IS_ERR(sharp_nt->supply))
269 return PTR_ERR(sharp_nt->supply);
270
271 sharp_nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
272 if (IS_ERR(sharp_nt->reset_gpio)) {
273 dev_err(dev, "cannot get reset-gpios %ld\n",
274 PTR_ERR(sharp_nt->reset_gpio));
275 sharp_nt->reset_gpio = NULL;
276 } else {
277 gpiod_set_value(sharp_nt->reset_gpio, 0);
278 }
279
280 np = of_parse_phandle(dev->of_node, "backlight", 0);
281 if (np) {
282 sharp_nt->backlight = of_find_backlight_by_node(np);
283 of_node_put(np);
284
285 if (!sharp_nt->backlight)
286 return -EPROBE_DEFER;
287 }
288
289 drm_panel_init(&sharp_nt->base);
290 sharp_nt->base.funcs = &sharp_nt_panel_funcs;
291 sharp_nt->base.dev = &sharp_nt->dsi->dev;
292
293 ret = drm_panel_add(&sharp_nt->base);
294 if (ret < 0)
295 goto put_backlight;
296
297 return 0;
298
299put_backlight:
300 if (sharp_nt->backlight)
301 put_device(&sharp_nt->backlight->dev);
302
303 return ret;
304}
305
306static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt)
307{
308 if (sharp_nt->base.dev)
309 drm_panel_remove(&sharp_nt->base);
310
311 if (sharp_nt->backlight)
312 put_device(&sharp_nt->backlight->dev);
313}
314
315static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
316{
317 struct sharp_nt_panel *sharp_nt;
318 int ret;
319
320 dsi->lanes = 2;
321 dsi->format = MIPI_DSI_FMT_RGB888;
322 dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
323 MIPI_DSI_MODE_VIDEO_HSE |
324 MIPI_DSI_CLOCK_NON_CONTINUOUS |
325 MIPI_DSI_MODE_EOT_PACKET;
326
327 sharp_nt = devm_kzalloc(&dsi->dev, sizeof(*sharp_nt), GFP_KERNEL);
328 if (!sharp_nt)
329 return -ENOMEM;
330
331 mipi_dsi_set_drvdata(dsi, sharp_nt);
332
333 sharp_nt->dsi = dsi;
334
335 ret = sharp_nt_panel_add(sharp_nt);
336 if (ret < 0)
337 return ret;
338
339 return mipi_dsi_attach(dsi);
340}
341
342static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
343{
344 struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
345 int ret;
346
347 ret = sharp_nt_panel_disable(&sharp_nt->base);
348 if (ret < 0)
349 dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
350
351 ret = mipi_dsi_detach(dsi);
352 if (ret < 0)
353 dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
354
355 drm_panel_detach(&sharp_nt->base);
356 sharp_nt_panel_del(sharp_nt);
357
358 return 0;
359}
360
361static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
362{
363 struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
364
365 sharp_nt_panel_disable(&sharp_nt->base);
366}
367
368static const struct of_device_id sharp_nt_of_match[] = {
369 { .compatible = "sharp,ls043t1le01-qhd", },
370 { }
371};
372MODULE_DEVICE_TABLE(of, sharp_nt_of_match);
373
374static struct mipi_dsi_driver sharp_nt_panel_driver = {
375 .driver = {
376 .name = "panel-sharp-ls043t1le01-qhd",
377 .of_match_table = sharp_nt_of_match,
378 },
379 .probe = sharp_nt_panel_probe,
380 .remove = sharp_nt_panel_remove,
381 .shutdown = sharp_nt_panel_shutdown,
382};
383module_mipi_dsi_driver(sharp_nt_panel_driver);
384
385MODULE_AUTHOR("Werner Johansson <werner.johansson@sonymobile.com>");
386MODULE_DESCRIPTION("Sharp LS043T1LE01 NT35565-based qHD (540x960) video mode panel driver");
387MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f97b73ec4713..f88a631c43ab 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -44,6 +44,10 @@ struct panel_desc {
44 44
45 unsigned int bpc; 45 unsigned int bpc;
46 46
47 /**
48 * @width: width (in millimeters) of the panel's active display area
49 * @height: height (in millimeters) of the panel's active display area
50 */
47 struct { 51 struct {
48 unsigned int width; 52 unsigned int width;
49 unsigned int height; 53 unsigned int height;
@@ -832,6 +836,34 @@ static const struct panel_desc innolux_g121i1_l01 = {
832 }, 836 },
833}; 837};
834 838
839static const struct drm_display_mode innolux_g121x1_l03_mode = {
840 .clock = 65000,
841 .hdisplay = 1024,
842 .hsync_start = 1024 + 0,
843 .hsync_end = 1024 + 1,
844 .htotal = 1024 + 0 + 1 + 320,
845 .vdisplay = 768,
846 .vsync_start = 768 + 38,
847 .vsync_end = 768 + 38 + 1,
848 .vtotal = 768 + 38 + 1 + 0,
849 .vrefresh = 60,
850};
851
852static const struct panel_desc innolux_g121x1_l03 = {
853 .modes = &innolux_g121x1_l03_mode,
854 .num_modes = 1,
855 .bpc = 6,
856 .size = {
857 .width = 246,
858 .height = 185,
859 },
860 .delay = {
861 .enable = 200,
862 .unprepare = 200,
863 .disable = 400,
864 },
865};
866
835static const struct drm_display_mode innolux_n116bge_mode = { 867static const struct drm_display_mode innolux_n116bge_mode = {
836 .clock = 76420, 868 .clock = 76420,
837 .hdisplay = 1366, 869 .hdisplay = 1366,
@@ -902,6 +934,30 @@ static const struct panel_desc innolux_zj070na_01p = {
902 }, 934 },
903}; 935};
904 936
937static const struct display_timing kyo_tcg121xglp_timing = {
938 .pixelclock = { 52000000, 65000000, 71000000 },
939 .hactive = { 1024, 1024, 1024 },
940 .hfront_porch = { 2, 2, 2 },
941 .hback_porch = { 2, 2, 2 },
942 .hsync_len = { 86, 124, 244 },
943 .vactive = { 768, 768, 768 },
944 .vfront_porch = { 2, 2, 2 },
945 .vback_porch = { 2, 2, 2 },
946 .vsync_len = { 6, 34, 73 },
947 .flags = DISPLAY_FLAGS_DE_HIGH,
948};
949
950static const struct panel_desc kyo_tcg121xglp = {
951 .timings = &kyo_tcg121xglp_timing,
952 .num_timings = 1,
953 .bpc = 8,
954 .size = {
955 .width = 246,
956 .height = 184,
957 },
958 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
959};
960
905static const struct drm_display_mode lg_lb070wv8_mode = { 961static const struct drm_display_mode lg_lb070wv8_mode = {
906 .clock = 33246, 962 .clock = 33246,
907 .hdisplay = 800, 963 .hdisplay = 800,
@@ -1027,6 +1083,30 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
1027 .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 1083 .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
1028}; 1084};
1029 1085
1086static const struct drm_display_mode qd43003c0_40_mode = {
1087 .clock = 9000,
1088 .hdisplay = 480,
1089 .hsync_start = 480 + 8,
1090 .hsync_end = 480 + 8 + 4,
1091 .htotal = 480 + 8 + 4 + 39,
1092 .vdisplay = 272,
1093 .vsync_start = 272 + 4,
1094 .vsync_end = 272 + 4 + 10,
1095 .vtotal = 272 + 4 + 10 + 2,
1096 .vrefresh = 60,
1097};
1098
1099static const struct panel_desc qd43003c0_40 = {
1100 .modes = &qd43003c0_40_mode,
1101 .num_modes = 1,
1102 .bpc = 8,
1103 .size = {
1104 .width = 95,
1105 .height = 53,
1106 },
1107 .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
1108};
1109
1030static const struct drm_display_mode samsung_ltn101nt05_mode = { 1110static const struct drm_display_mode samsung_ltn101nt05_mode = {
1031 .clock = 54030, 1111 .clock = 54030,
1032 .hdisplay = 1024, 1112 .hdisplay = 1024,
@@ -1158,6 +1238,9 @@ static const struct of_device_id platform_of_match[] = {
1158 .compatible ="innolux,g121i1-l01", 1238 .compatible ="innolux,g121i1-l01",
1159 .data = &innolux_g121i1_l01 1239 .data = &innolux_g121i1_l01
1160 }, { 1240 }, {
1241 .compatible = "innolux,g121x1-l03",
1242 .data = &innolux_g121x1_l03,
1243 }, {
1161 .compatible = "innolux,n116bge", 1244 .compatible = "innolux,n116bge",
1162 .data = &innolux_n116bge, 1245 .data = &innolux_n116bge,
1163 }, { 1246 }, {
@@ -1167,6 +1250,9 @@ static const struct of_device_id platform_of_match[] = {
1167 .compatible = "innolux,zj070na-01p", 1250 .compatible = "innolux,zj070na-01p",
1168 .data = &innolux_zj070na_01p, 1251 .data = &innolux_zj070na_01p,
1169 }, { 1252 }, {
1253 .compatible = "kyo,tcg121xglp",
1254 .data = &kyo_tcg121xglp,
1255 }, {
1170 .compatible = "lg,lb070wv8", 1256 .compatible = "lg,lb070wv8",
1171 .data = &lg_lb070wv8, 1257 .data = &lg_lb070wv8,
1172 }, { 1258 }, {
@@ -1182,6 +1268,9 @@ static const struct of_device_id platform_of_match[] = {
1182 .compatible = "ortustech,com43h4m85ulc", 1268 .compatible = "ortustech,com43h4m85ulc",
1183 .data = &ortustech_com43h4m85ulc, 1269 .data = &ortustech_com43h4m85ulc,
1184 }, { 1270 }, {
1271 .compatible = "qiaodian,qd43003c0-40",
1272 .data = &qd43003c0_40,
1273 }, {
1185 .compatible = "samsung,ltn101nt05", 1274 .compatible = "samsung,ltn101nt05",
1186 .data = &samsung_ltn101nt05, 1275 .data = &samsung_ltn101nt05,
1187 }, { 1276 }, {
@@ -1263,6 +1352,36 @@ static const struct panel_desc_dsi auo_b080uan01 = {
1263 .lanes = 4, 1352 .lanes = 4,
1264}; 1353};
1265 1354
1355static const struct drm_display_mode boe_tv080wum_nl0_mode = {
1356 .clock = 160000,
1357 .hdisplay = 1200,
1358 .hsync_start = 1200 + 120,
1359 .hsync_end = 1200 + 120 + 20,
1360 .htotal = 1200 + 120 + 20 + 21,
1361 .vdisplay = 1920,
1362 .vsync_start = 1920 + 21,
1363 .vsync_end = 1920 + 21 + 3,
1364 .vtotal = 1920 + 21 + 3 + 18,
1365 .vrefresh = 60,
1366 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
1367};
1368
1369static const struct panel_desc_dsi boe_tv080wum_nl0 = {
1370 .desc = {
1371 .modes = &boe_tv080wum_nl0_mode,
1372 .num_modes = 1,
1373 .size = {
1374 .width = 107,
1375 .height = 172,
1376 },
1377 },
1378 .flags = MIPI_DSI_MODE_VIDEO |
1379 MIPI_DSI_MODE_VIDEO_BURST |
1380 MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
1381 .format = MIPI_DSI_FMT_RGB888,
1382 .lanes = 4,
1383};
1384
1266static const struct drm_display_mode lg_ld070wx3_sl01_mode = { 1385static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
1267 .clock = 71000, 1386 .clock = 71000,
1268 .hdisplay = 800, 1387 .hdisplay = 800,
@@ -1348,11 +1467,15 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
1348 .lanes = 4, 1467 .lanes = 4,
1349}; 1468};
1350 1469
1470
1351static const struct of_device_id dsi_of_match[] = { 1471static const struct of_device_id dsi_of_match[] = {
1352 { 1472 {
1353 .compatible = "auo,b080uan01", 1473 .compatible = "auo,b080uan01",
1354 .data = &auo_b080uan01 1474 .data = &auo_b080uan01
1355 }, { 1475 }, {
1476 .compatible = "boe,tv080wum-nl0",
1477 .data = &boe_tv080wum_nl0
1478 }, {
1356 .compatible = "lg,ld070wx3-sl01", 1479 .compatible = "lg,ld070wx3-sl01",
1357 .data = &lg_ld070wx3_sl01 1480 .data = &lg_ld070wx3_sl01
1358 }, { 1481 }, {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index cddba079197f..86276519b2ef 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -876,16 +876,6 @@ static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
876 .best_encoder = qxl_best_encoder, 876 .best_encoder = qxl_best_encoder,
877}; 877};
878 878
879static void qxl_conn_save(struct drm_connector *connector)
880{
881 DRM_DEBUG("\n");
882}
883
884static void qxl_conn_restore(struct drm_connector *connector)
885{
886 DRM_DEBUG("\n");
887}
888
889static enum drm_connector_status qxl_conn_detect( 879static enum drm_connector_status qxl_conn_detect(
890 struct drm_connector *connector, 880 struct drm_connector *connector,
891 bool force) 881 bool force)
@@ -932,10 +922,8 @@ static void qxl_conn_destroy(struct drm_connector *connector)
932 922
933static const struct drm_connector_funcs qxl_connector_funcs = { 923static const struct drm_connector_funcs qxl_connector_funcs = {
934 .dpms = drm_helper_connector_dpms, 924 .dpms = drm_helper_connector_dpms,
935 .save = qxl_conn_save,
936 .restore = qxl_conn_restore,
937 .detect = qxl_conn_detect, 925 .detect = qxl_conn_detect,
938 .fill_modes = drm_helper_probe_single_connector_modes_nomerge, 926 .fill_modes = drm_helper_probe_single_connector_modes,
939 .set_property = qxl_conn_set_property, 927 .set_property = qxl_conn_set_property,
940 .destroy = qxl_conn_destroy, 928 .destroy = qxl_conn_destroy,
941}; 929};
@@ -980,7 +968,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
980 &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); 968 &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
981 969
982 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs, 970 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
983 DRM_MODE_ENCODER_VIRTUAL); 971 DRM_MODE_ENCODER_VIRTUAL, NULL);
984 972
985 /* we get HPD via client monitors config */ 973 /* we get HPD via client monitors config */
986 connector->polled = DRM_CONNECTOR_POLL_HPD; 974 connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b28370e014c6..5e1d7899dd72 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -32,7 +32,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
32 struct qxl_bo *bo; 32 struct qxl_bo *bo;
33 struct qxl_device *qdev; 33 struct qxl_device *qdev;
34 34
35 bo = container_of(tbo, struct qxl_bo, tbo); 35 bo = to_qxl_bo(tbo);
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37 37
38 qxl_surface_evict(qdev, bo, false); 38 qxl_surface_evict(qdev, bo, false);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0cbc4c987164..953412766416 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -201,7 +201,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
201 placement->num_busy_placement = 1; 201 placement->num_busy_placement = 1;
202 return; 202 return;
203 } 203 }
204 qbo = container_of(bo, struct qxl_bo, tbo); 204 qbo = to_qxl_bo(bo);
205 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); 205 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
206 *placement = qbo->placement; 206 *placement = qbo->placement;
207} 207}
@@ -365,7 +365,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
365 365
366 if (!qxl_ttm_bo_is_qxl_bo(bo)) 366 if (!qxl_ttm_bo_is_qxl_bo(bo))
367 return; 367 return;
368 qbo = container_of(bo, struct qxl_bo, tbo); 368 qbo = to_qxl_bo(bo);
369 qdev = qbo->gem_base.dev->dev_private; 369 qdev = qbo->gem_base.dev->dev_private;
370 370
371 if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id) 371 if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 421ae130809b..9909f5c68d76 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -5,12 +5,3 @@ config DRM_RADEON_USERPTR
5 help 5 help
6 This option selects CONFIG_MMU_NOTIFIER if it isn't already 6 This option selects CONFIG_MMU_NOTIFIER if it isn't already
7 selected to enabled full userptr support. 7 selected to enabled full userptr support.
8
9config DRM_RADEON_UMS
10 bool "Enable userspace modesetting on radeon (DEPRECATED)"
11 depends on DRM_RADEON
12 help
13 Choose this option if you still need userspace modesetting.
14
15 Userspace modesetting is deprecated for quite some time now, so
16 enable this only if you have ancient versions of the DDX drivers.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index dea53e36a2ef..08bd17d3925c 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -58,10 +58,6 @@ $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
58 58
59radeon-y := radeon_drv.o 59radeon-y := radeon_drv.o
60 60
61# add UMS driver
62radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
63 radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o drm_buffer.o
64
65# add KMS driver 61# add KMS driver
66radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ 62radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
67 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ 63 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad24b31..801dd60ac192 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -25,6 +25,7 @@
25 */ 25 */
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/drm_fb_helper.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include <drm/drm_fixed.h> 30#include <drm/drm_fixed.h>
30#include "radeon.h" 31#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index bd73b4069069..44ee72e04df9 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -302,77 +302,31 @@ static int convert_bpc_to_bpp(int bpc)
302 return bpc * 3; 302 return bpc * 3;
303} 303}
304 304
305/* get the max pix clock supported by the link rate and lane num */
306static int dp_get_max_dp_pix_clock(int link_rate,
307 int lane_num,
308 int bpp)
309{
310 return (link_rate * lane_num * 8) / bpp;
311}
312
313/***** radeon specific DP functions *****/ 305/***** radeon specific DP functions *****/
314 306
315int radeon_dp_get_max_link_rate(struct drm_connector *connector, 307int radeon_dp_get_dp_link_config(struct drm_connector *connector,
316 const u8 dpcd[DP_DPCD_SIZE]) 308 const u8 dpcd[DP_DPCD_SIZE],
317{ 309 unsigned pix_clock,
318 int max_link_rate; 310 unsigned *dp_lanes, unsigned *dp_rate)
319
320 if (radeon_connector_is_dp12_capable(connector))
321 max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
322 else
323 max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
324
325 return max_link_rate;
326}
327
328/* First get the min lane# when low rate is used according to pixel clock
329 * (prefer low rate), second check max lane# supported by DP panel,
330 * if the max lane# < low rate lane# then use max lane# instead.
331 */
332static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
333 const u8 dpcd[DP_DPCD_SIZE],
334 int pix_clock)
335{
336 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
337 int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
338 int max_lane_num = drm_dp_max_lane_count(dpcd);
339 int lane_num;
340 int max_dp_pix_clock;
341
342 for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
343 max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
344 if (pix_clock <= max_dp_pix_clock)
345 break;
346 }
347
348 return lane_num;
349}
350
351static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
352 const u8 dpcd[DP_DPCD_SIZE],
353 int pix_clock)
354{ 311{
355 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); 312 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
356 int lane_num, max_pix_clock; 313 static const unsigned link_rates[3] = { 162000, 270000, 540000 };
357 314 unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
358 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 315 unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
359 ENCODER_OBJECT_ID_NUTMEG) 316 unsigned lane_num, i, max_pix_clock;
360 return 270000; 317
361 318 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
362 lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); 319 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
363 max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp); 320 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
364 if (pix_clock <= max_pix_clock) 321 if (max_pix_clock >= pix_clock) {
365 return 162000; 322 *dp_lanes = lane_num;
366 max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp); 323 *dp_rate = link_rates[i];
367 if (pix_clock <= max_pix_clock) 324 return 0;
368 return 270000; 325 }
369 if (radeon_connector_is_dp12_capable(connector)) { 326 }
370 max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
371 if (pix_clock <= max_pix_clock)
372 return 540000;
373 } 327 }
374 328
375 return radeon_dp_get_max_link_rate(connector, dpcd); 329 return -EINVAL;
376} 330}
377 331
378static u8 radeon_dp_encoder_service(struct radeon_device *rdev, 332static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -491,6 +445,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
491{ 445{
492 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 446 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
493 struct radeon_connector_atom_dig *dig_connector; 447 struct radeon_connector_atom_dig *dig_connector;
448 int ret;
494 449
495 if (!radeon_connector->con_priv) 450 if (!radeon_connector->con_priv)
496 return; 451 return;
@@ -498,10 +453,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
498 453
499 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 454 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
500 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 455 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
501 dig_connector->dp_clock = 456 ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
502 radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); 457 mode->clock,
503 dig_connector->dp_lane_count = 458 &dig_connector->dp_lane_count,
504 radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); 459 &dig_connector->dp_clock);
460 if (ret) {
461 dig_connector->dp_clock = 0;
462 dig_connector->dp_lane_count = 0;
463 }
505 } 464 }
506} 465}
507 466
@@ -510,7 +469,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
510{ 469{
511 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 470 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
512 struct radeon_connector_atom_dig *dig_connector; 471 struct radeon_connector_atom_dig *dig_connector;
513 int dp_clock; 472 unsigned dp_clock, dp_lanes;
473 int ret;
514 474
515 if ((mode->clock > 340000) && 475 if ((mode->clock > 340000) &&
516 (!radeon_connector_is_dp12_capable(connector))) 476 (!radeon_connector_is_dp12_capable(connector)))
@@ -520,8 +480,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
520 return MODE_CLOCK_HIGH; 480 return MODE_CLOCK_HIGH;
521 dig_connector = radeon_connector->con_priv; 481 dig_connector = radeon_connector->con_priv;
522 482
523 dp_clock = 483 ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
524 radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); 484 mode->clock,
485 &dp_lanes,
486 &dp_clock);
487 if (ret)
488 return MODE_CLOCK_HIGH;
525 489
526 if ((dp_clock == 540000) && 490 if ((dp_clock == 540000) &&
527 (!radeon_connector_is_dp12_capable(connector))) 491 (!radeon_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index bb292143997e..01b20e14a247 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2767,23 +2767,27 @@ radeon_add_atom_encoder(struct drm_device *dev,
2767 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 2767 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
2768 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 2768 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2769 radeon_encoder->rmx_type = RMX_FULL; 2769 radeon_encoder->rmx_type = RMX_FULL;
2770 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2770 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2771 DRM_MODE_ENCODER_LVDS, NULL);
2771 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 2772 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2772 } else { 2773 } else {
2773 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2774 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2775 DRM_MODE_ENCODER_TMDS, NULL);
2774 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2776 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2775 } 2777 }
2776 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2778 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2777 break; 2779 break;
2778 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 2780 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
2779 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 2781 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2782 DRM_MODE_ENCODER_DAC, NULL);
2780 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); 2783 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2781 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 2784 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2782 break; 2785 break;
2783 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 2786 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2784 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 2787 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2785 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 2788 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2786 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); 2789 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2790 DRM_MODE_ENCODER_TVDAC, NULL);
2787 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); 2791 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2788 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 2792 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2789 break; 2793 break;
@@ -2797,13 +2801,16 @@ radeon_add_atom_encoder(struct drm_device *dev,
2797 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2801 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2798 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 2802 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2799 radeon_encoder->rmx_type = RMX_FULL; 2803 radeon_encoder->rmx_type = RMX_FULL;
2800 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2804 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2805 DRM_MODE_ENCODER_LVDS, NULL);
2801 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 2806 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2802 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 2807 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2803 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 2808 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2809 DRM_MODE_ENCODER_DAC, NULL);
2804 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2810 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2805 } else { 2811 } else {
2806 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2812 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2813 DRM_MODE_ENCODER_TMDS, NULL);
2807 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2814 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2808 } 2815 }
2809 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2816 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
@@ -2820,11 +2827,14 @@ radeon_add_atom_encoder(struct drm_device *dev,
2820 /* these are handled by the primary encoders */ 2827 /* these are handled by the primary encoders */
2821 radeon_encoder->is_ext_encoder = true; 2828 radeon_encoder->is_ext_encoder = true;
2822 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 2829 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2823 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2830 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2831 DRM_MODE_ENCODER_LVDS, NULL);
2824 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 2832 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
2825 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 2833 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2834 DRM_MODE_ENCODER_DAC, NULL);
2826 else 2835 else
2827 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2836 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2837 DRM_MODE_ENCODER_TMDS, NULL);
2828 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); 2838 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
2829 break; 2839 break;
2830 } 2840 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0154db43860c..4c30d8c65558 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4132,10 +4132,10 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
4132 * @rdev: radeon_device pointer 4132 * @rdev: radeon_device pointer
4133 * @ib: radeon indirect buffer object 4133 * @ib: radeon indirect buffer object
4134 * 4134 *
4135 * Emits an DE (drawing engine) or CE (constant engine) IB 4135 * Emits a DE (drawing engine) or CE (constant engine) IB
4136 * on the gfx ring. IBs are usually generated by userspace 4136 * on the gfx ring. IBs are usually generated by userspace
4137 * acceleration drivers and submitted to the kernel for 4137 * acceleration drivers and submitted to the kernel for
4138 * sheduling on the ring. This function schedules the IB 4138 * scheduling on the ring. This function schedules the IB
4139 * on the gfx ring for execution by the GPU. 4139 * on the gfx ring for execution by the GPU.
4140 */ 4140 */
4141void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 4141void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -4173,11 +4173,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
4173 control |= ib->length_dw | (vm_id << 24); 4173 control |= ib->length_dw | (vm_id << 24);
4174 4174
4175 radeon_ring_write(ring, header); 4175 radeon_ring_write(ring, header);
4176 radeon_ring_write(ring, 4176 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
4177#ifdef __BIG_ENDIAN
4178 (2 << 0) |
4179#endif
4180 (ib->gpu_addr & 0xFFFFFFFC));
4181 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 4177 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4182 radeon_ring_write(ring, control); 4178 radeon_ring_write(ring, control);
4183} 4179}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 752072771388..6bfc46369db1 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -301,6 +301,22 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
301 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 301 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
302 */ 302 */
303 if (ASIC_IS_DCE8(rdev)) { 303 if (ASIC_IS_DCE8(rdev)) {
304 unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
305 DENTIST_DPREFCLK_WDIVIDER_MASK) >>
306 DENTIST_DPREFCLK_WDIVIDER_SHIFT;
307
308 if (div < 128 && div >= 96)
309 div -= 64;
310 else if (div >= 64)
311 div = div / 2 - 16;
312 else if (div >= 8)
313 div /= 4;
314 else
315 div = 0;
316
317 if (div)
318 clock = rdev->clock.gpupll_outputfreq * 10 / div;
319
304 WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); 320 WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
305 WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); 321 WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
306 } else { 322 } else {
diff --git a/drivers/gpu/drm/radeon/drm_buffer.c b/drivers/gpu/drm/radeon/drm_buffer.c
deleted file mode 100644
index f4e0f3a3d7b1..000000000000
--- a/drivers/gpu/drm/radeon/drm_buffer.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2010 Pauli Nieminen.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Multipart buffer for coping data which is larger than the page size.
30 *
31 * Authors:
32 * Pauli Nieminen <suokkos-at-gmail-dot-com>
33 */
34
35#include <linux/export.h>
36#include "drm_buffer.h"
37
38/**
39 * Allocate the drm buffer object.
40 *
41 * buf: Pointer to a pointer where the object is stored.
42 * size: The number of bytes to allocate.
43 */
44int drm_buffer_alloc(struct drm_buffer **buf, int size)
45{
46 int nr_pages = size / PAGE_SIZE + 1;
47 int idx;
48
49 /* Allocating pointer table to end of structure makes drm_buffer
50 * variable sized */
51 *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
52 GFP_KERNEL);
53
54 if (*buf == NULL) {
55 DRM_ERROR("Failed to allocate drm buffer object to hold"
56 " %d bytes in %d pages.\n",
57 size, nr_pages);
58 return -ENOMEM;
59 }
60
61 (*buf)->size = size;
62
63 for (idx = 0; idx < nr_pages; ++idx) {
64
65 (*buf)->data[idx] =
66 kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
67 GFP_KERNEL);
68
69
70 if ((*buf)->data[idx] == NULL) {
71 DRM_ERROR("Failed to allocate %dth page for drm"
72 " buffer with %d bytes and %d pages.\n",
73 idx + 1, size, nr_pages);
74 goto error_out;
75 }
76
77 }
78
79 return 0;
80
81error_out:
82
83 for (; idx >= 0; --idx)
84 kfree((*buf)->data[idx]);
85
86 kfree(*buf);
87 return -ENOMEM;
88}
89
90/**
91 * Copy the user data to the begin of the buffer and reset the processing
92 * iterator.
93 *
94 * user_data: A pointer the data that is copied to the buffer.
95 * size: The Number of bytes to copy.
96 */
97int drm_buffer_copy_from_user(struct drm_buffer *buf,
98 void __user *user_data, int size)
99{
100 int nr_pages = size / PAGE_SIZE + 1;
101 int idx;
102
103 if (size > buf->size) {
104 DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
105 " %d bytes space\n",
106 size, buf->size);
107 return -EFAULT;
108 }
109
110 for (idx = 0; idx < nr_pages; ++idx) {
111
112 if (copy_from_user(buf->data[idx],
113 user_data + idx * PAGE_SIZE,
114 min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
115 DRM_ERROR("Failed to copy user data (%p) to drm buffer"
116 " (%p) %dth page.\n",
117 user_data, buf, idx);
118 return -EFAULT;
119
120 }
121 }
122 buf->iterator = 0;
123 return 0;
124}
125
126/**
127 * Free the drm buffer object
128 */
129void drm_buffer_free(struct drm_buffer *buf)
130{
131
132 if (buf != NULL) {
133
134 int nr_pages = buf->size / PAGE_SIZE + 1;
135 int idx;
136 for (idx = 0; idx < nr_pages; ++idx)
137 kfree(buf->data[idx]);
138
139 kfree(buf);
140 }
141}
142
143/**
144 * Read an object from buffer that may be split to multiple parts. If object
145 * is not split function just returns the pointer to object in buffer. But in
146 * case of split object data is copied to given stack object that is suplied
147 * by caller.
148 *
149 * The processing location of the buffer is also advanced to the next byte
150 * after the object.
151 *
152 * objsize: The size of the objet in bytes.
153 * stack_obj: A pointer to a memory location where object can be copied.
154 */
155void *drm_buffer_read_object(struct drm_buffer *buf,
156 int objsize, void *stack_obj)
157{
158 int idx = drm_buffer_index(buf);
159 int page = drm_buffer_page(buf);
160 void *obj = NULL;
161
162 if (idx + objsize <= PAGE_SIZE) {
163 obj = &buf->data[page][idx];
164 } else {
165 /* The object is split which forces copy to temporary object.*/
166 int beginsz = PAGE_SIZE - idx;
167 memcpy(stack_obj, &buf->data[page][idx], beginsz);
168
169 memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
170 objsize - beginsz);
171
172 obj = stack_obj;
173 }
174
175 drm_buffer_advance(buf, objsize);
176 return obj;
177}
diff --git a/drivers/gpu/drm/radeon/drm_buffer.h b/drivers/gpu/drm/radeon/drm_buffer.h
deleted file mode 100644
index c80d3a340b94..000000000000
--- a/drivers/gpu/drm/radeon/drm_buffer.h
+++ /dev/null
@@ -1,148 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2010 Pauli Nieminen.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Multipart buffer for coping data which is larger than the page size.
30 *
31 * Authors:
32 * Pauli Nieminen <suokkos-at-gmail-dot-com>
33 */
34
35#ifndef _DRM_BUFFER_H_
36#define _DRM_BUFFER_H_
37
38#include <drm/drmP.h>
39
40struct drm_buffer {
41 int iterator;
42 int size;
43 char *data[];
44};
45
46
47/**
48 * Return the index of page that buffer is currently pointing at.
49 */
50static inline int drm_buffer_page(struct drm_buffer *buf)
51{
52 return buf->iterator / PAGE_SIZE;
53}
54/**
55 * Return the index of the current byte in the page
56 */
57static inline int drm_buffer_index(struct drm_buffer *buf)
58{
59 return buf->iterator & (PAGE_SIZE - 1);
60}
61/**
62 * Return number of bytes that is left to process
63 */
64static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
65{
66 return buf->size - buf->iterator;
67}
68
69/**
70 * Advance the buffer iterator number of bytes that is given.
71 */
72static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
73{
74 buf->iterator += bytes;
75}
76
77/**
78 * Allocate the drm buffer object.
79 *
80 * buf: A pointer to a pointer where the object is stored.
81 * size: The number of bytes to allocate.
82 */
83extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
84
85/**
86 * Copy the user data to the begin of the buffer and reset the processing
87 * iterator.
88 *
89 * user_data: A pointer the data that is copied to the buffer.
90 * size: The Number of bytes to copy.
91 */
92extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
93 void __user *user_data, int size);
94
95/**
96 * Free the drm buffer object
97 */
98extern void drm_buffer_free(struct drm_buffer *buf);
99
100/**
101 * Read an object from buffer that may be split to multiple parts. If object
102 * is not split function just returns the pointer to object in buffer. But in
103 * case of split object data is copied to given stack object that is suplied
104 * by caller.
105 *
106 * The processing location of the buffer is also advanced to the next byte
107 * after the object.
108 *
109 * objsize: The size of the objet in bytes.
110 * stack_obj: A pointer to a memory location where object can be copied.
111 */
112extern void *drm_buffer_read_object(struct drm_buffer *buf,
113 int objsize, void *stack_obj);
114
115/**
116 * Returns the pointer to the dword which is offset number of elements from the
117 * current processing location.
118 *
119 * Caller must make sure that dword is not split in the buffer. This
120 * requirement is easily met if all the sizes of objects in buffer are
121 * multiples of dword and PAGE_SIZE is multiple dword.
122 *
123 * Call to this function doesn't change the processing location.
124 *
125 * offset: The index of the dword relative to the internat iterator.
126 */
127static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
128 int offset)
129{
130 int iter = buffer->iterator + offset * 4;
131 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
132}
133/**
134 * Returns the pointer to the dword which is offset number of elements from
135 * the current processing location.
136 *
137 * Call to this function doesn't change the processing location.
138 *
139 * offset: The index of the byte relative to the internat iterator.
140 */
141static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
142 int offset)
143{
144 int iter = buffer->iterator + offset;
145 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
146}
147
148#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9e7e2bf03b81..5eae0a88dd3e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3150,7 +3150,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3150{ 3150{
3151 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3151 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3152 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3152 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3153 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 3153 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
3154 fixed20_12 crit_point_ff = {0};
3154 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3155 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3155 fixed20_12 memtcas_ff[8] = { 3156 fixed20_12 memtcas_ff[8] = {
3156 dfixed_init(1), 3157 dfixed_init(1),
@@ -3204,7 +3205,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3204 fixed20_12 min_mem_eff; 3205 fixed20_12 min_mem_eff;
3205 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3206 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3206 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3207 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3207 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 3208 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0},
3208 disp_drain_rate2, read_return_rate; 3209 disp_drain_rate2, read_return_rate;
3209 fixed20_12 time_disp1_drop_priority; 3210 fixed20_12 time_disp1_drop_priority;
3210 int c; 3211 int c;
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
deleted file mode 100644
index 9418e388b045..000000000000
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ /dev/null
@@ -1,1186 +0,0 @@
1/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
32 *
33 * ------------------------ This file is DEPRECATED! -------------------------
34 */
35
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
38#include "radeon_drv.h"
39#include "r300_reg.h"
40#include "drm_buffer.h"
41
42#include <asm/unaligned.h>
43
44#define R300_SIMULTANEOUS_CLIPRECTS 4
45
46/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
47 */
48static const int r300_cliprect_cntl[4] = {
49 0xAAAA,
50 0xEEEE,
51 0xFEFE,
52 0xFFFE
53};
54
55/**
56 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
57 * buffer, starting with index n.
58 */
59static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
60 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
61{
62 struct drm_clip_rect box;
63 int nr;
64 int i;
65 RING_LOCALS;
66
67 nr = cmdbuf->nbox - n;
68 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
69 nr = R300_SIMULTANEOUS_CLIPRECTS;
70
71 DRM_DEBUG("%i cliprects\n", nr);
72
73 if (nr) {
74 BEGIN_RING(6 + nr * 2);
75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
76
77 for (i = 0; i < nr; ++i) {
78 if (copy_from_user
79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
80 DRM_ERROR("copy cliprect faulted\n");
81 return -EFAULT;
82 }
83
84 box.x2--; /* Hardware expects inclusive bottom-right corner */
85 box.y2--;
86
87 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
88 box.x1 = (box.x1) &
89 R300_CLIPRECT_MASK;
90 box.y1 = (box.y1) &
91 R300_CLIPRECT_MASK;
92 box.x2 = (box.x2) &
93 R300_CLIPRECT_MASK;
94 box.y2 = (box.y2) &
95 R300_CLIPRECT_MASK;
96 } else {
97 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
98 R300_CLIPRECT_MASK;
99 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
100 R300_CLIPRECT_MASK;
101 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
102 R300_CLIPRECT_MASK;
103 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
104 R300_CLIPRECT_MASK;
105 }
106
107 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
108 (box.y1 << R300_CLIPRECT_Y_SHIFT));
109 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
110 (box.y2 << R300_CLIPRECT_Y_SHIFT));
111
112 }
113
114 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
115
116 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
117 * client might be able to trample over memory.
118 * The impact should be very limited, but I'd rather be safe than
119 * sorry.
120 */
121 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
122 OUT_RING(0);
123 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
124 ADVANCE_RING();
125 } else {
126 /* Why we allow zero cliprect rendering:
127 * There are some commands in a command buffer that must be submitted
128 * even when there are no cliprects, e.g. DMA buffer discard
129 * or state setting (though state setting could be avoided by
130 * simulating a loss of context).
131 *
132 * Now since the cmdbuf interface is so chaotic right now (and is
133 * bound to remain that way for a bit until things settle down),
134 * it is basically impossible to filter out the commands that are
135 * necessary and those that aren't.
136 *
137 * So I choose the safe way and don't do any filtering at all;
138 * instead, I simply set up the engine so that all rendering
139 * can't produce any fragments.
140 */
141 BEGIN_RING(2);
142 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
143 ADVANCE_RING();
144 }
145
146 /* flus cache and wait idle clean after cliprect change */
147 BEGIN_RING(2);
148 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
149 OUT_RING(R300_RB3D_DC_FLUSH);
150 ADVANCE_RING();
151 BEGIN_RING(2);
152 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
153 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
154 ADVANCE_RING();
155 /* set flush flag */
156 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
157
158 return 0;
159}
160
161static u8 r300_reg_flags[0x10000 >> 2];
162
163void r300_init_reg_flags(struct drm_device *dev)
164{
165 int i;
166 drm_radeon_private_t *dev_priv = dev->dev_private;
167
168 memset(r300_reg_flags, 0, 0x10000 >> 2);
169#define ADD_RANGE_MARK(reg, count,mark) \
170 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
171 r300_reg_flags[i]|=(mark);
172
173#define MARK_SAFE 1
174#define MARK_CHECK_OFFSET 2
175
176#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
177
178 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
179 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
180 ADD_RANGE(R300_VAP_CNTL, 1);
181 ADD_RANGE(R300_SE_VTE_CNTL, 2);
182 ADD_RANGE(0x2134, 2);
183 ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
184 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
185 ADD_RANGE(0x21DC, 1);
186 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
187 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
188 ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
189 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
190 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
191 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
192 ADD_RANGE(R300_GB_ENABLE, 1);
193 ADD_RANGE(R300_GB_MSPOS0, 5);
194 ADD_RANGE(R300_TX_INVALTAGS, 1);
195 ADD_RANGE(R300_TX_ENABLE, 1);
196 ADD_RANGE(0x4200, 4);
197 ADD_RANGE(0x4214, 1);
198 ADD_RANGE(R300_RE_POINTSIZE, 1);
199 ADD_RANGE(0x4230, 3);
200 ADD_RANGE(R300_RE_LINE_CNT, 1);
201 ADD_RANGE(R300_RE_UNK4238, 1);
202 ADD_RANGE(0x4260, 3);
203 ADD_RANGE(R300_RE_SHADE, 4);
204 ADD_RANGE(R300_RE_POLYGON_MODE, 5);
205 ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
206 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
207 ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
208 ADD_RANGE(R300_RE_CULL_CNTL, 1);
209 ADD_RANGE(0x42C0, 2);
210 ADD_RANGE(R300_RS_CNTL_0, 2);
211
212 ADD_RANGE(R300_SU_REG_DEST, 1);
213 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
214 ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
215
216 ADD_RANGE(R300_SC_HYPERZ, 2);
217 ADD_RANGE(0x43E8, 1);
218
219 ADD_RANGE(0x46A4, 5);
220
221 ADD_RANGE(R300_RE_FOG_STATE, 1);
222 ADD_RANGE(R300_FOG_COLOR_R, 3);
223 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
224 ADD_RANGE(0x4BD8, 1);
225 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
226 ADD_RANGE(0x4E00, 1);
227 ADD_RANGE(R300_RB3D_CBLEND, 2);
228 ADD_RANGE(R300_RB3D_COLORMASK, 1);
229 ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
230 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
231 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
232 ADD_RANGE(0x4E50, 9);
233 ADD_RANGE(0x4E88, 1);
234 ADD_RANGE(0x4EA0, 2);
235 ADD_RANGE(R300_ZB_CNTL, 3);
236 ADD_RANGE(R300_ZB_FORMAT, 4);
237 ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
238 ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
239 ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
240 ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
241 ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
242
243 ADD_RANGE(R300_TX_FILTER_0, 16);
244 ADD_RANGE(R300_TX_FILTER1_0, 16);
245 ADD_RANGE(R300_TX_SIZE_0, 16);
246 ADD_RANGE(R300_TX_FORMAT_0, 16);
247 ADD_RANGE(R300_TX_PITCH_0, 16);
248 /* Texture offset is dangerous and needs more checking */
249 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
250 ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
251 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
252
253 /* Sporadic registers used as primitives are emitted */
254 ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
255 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
256 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
257 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
258
259 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
260 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
261 ADD_RANGE(R500_US_CONFIG, 2);
262 ADD_RANGE(R500_US_CODE_ADDR, 3);
263 ADD_RANGE(R500_US_FC_CTRL, 1);
264 ADD_RANGE(R500_RS_IP_0, 16);
265 ADD_RANGE(R500_RS_INST_0, 16);
266 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
267 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
268 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
269 } else {
270 ADD_RANGE(R300_PFS_CNTL_0, 3);
271 ADD_RANGE(R300_PFS_NODE_0, 4);
272 ADD_RANGE(R300_PFS_TEXI_0, 64);
273 ADD_RANGE(R300_PFS_INSTR0_0, 64);
274 ADD_RANGE(R300_PFS_INSTR1_0, 64);
275 ADD_RANGE(R300_PFS_INSTR2_0, 64);
276 ADD_RANGE(R300_PFS_INSTR3_0, 64);
277 ADD_RANGE(R300_RS_INTERP_0, 8);
278 ADD_RANGE(R300_RS_ROUTE_0, 8);
279
280 }
281}
282
283static __inline__ int r300_check_range(unsigned reg, int count)
284{
285 int i;
286 if (reg & ~0xffff)
287 return -1;
288 for (i = (reg >> 2); i < (reg >> 2) + count; i++)
289 if (r300_reg_flags[i] != MARK_SAFE)
290 return 1;
291 return 0;
292}
293
294static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
295 dev_priv,
296 drm_radeon_kcmd_buffer_t
297 * cmdbuf,
298 drm_r300_cmd_header_t
299 header)
300{
301 int reg;
302 int sz;
303 int i;
304 u32 *value;
305 RING_LOCALS;
306
307 sz = header.packet0.count;
308 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
309
310 if ((sz > 64) || (sz < 0)) {
311 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
312 reg, sz);
313 return -EINVAL;
314 }
315
316 for (i = 0; i < sz; i++) {
317 switch (r300_reg_flags[(reg >> 2) + i]) {
318 case MARK_SAFE:
319 break;
320 case MARK_CHECK_OFFSET:
321 value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
322 if (!radeon_check_offset(dev_priv, *value)) {
323 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
324 reg, sz);
325 return -EINVAL;
326 }
327 break;
328 default:
329 DRM_ERROR("Register %04x failed check as flag=%02x\n",
330 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
331 return -EINVAL;
332 }
333 }
334
335 BEGIN_RING(1 + sz);
336 OUT_RING(CP_PACKET0(reg, sz - 1));
337 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
338 ADVANCE_RING();
339
340 return 0;
341}
342
343/**
344 * Emits a packet0 setting arbitrary registers.
345 * Called by r300_do_cp_cmdbuf.
346 *
347 * Note that checks are performed on contents and addresses of the registers
348 */
349static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
350 drm_radeon_kcmd_buffer_t *cmdbuf,
351 drm_r300_cmd_header_t header)
352{
353 int reg;
354 int sz;
355 RING_LOCALS;
356
357 sz = header.packet0.count;
358 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
359
360 if (!sz)
361 return 0;
362
363 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
364 return -EINVAL;
365
366 if (reg + sz * 4 >= 0x10000) {
367 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
368 sz);
369 return -EINVAL;
370 }
371
372 if (r300_check_range(reg, sz)) {
373 /* go and check everything */
374 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
375 header);
376 }
377 /* the rest of the data is safe to emit, whatever the values the user passed */
378
379 BEGIN_RING(1 + sz);
380 OUT_RING(CP_PACKET0(reg, sz - 1));
381 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
382 ADVANCE_RING();
383
384 return 0;
385}
386
387/**
388 * Uploads user-supplied vertex program instructions or parameters onto
389 * the graphics card.
390 * Called by r300_do_cp_cmdbuf.
391 */
392static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
393 drm_radeon_kcmd_buffer_t *cmdbuf,
394 drm_r300_cmd_header_t header)
395{
396 int sz;
397 int addr;
398 RING_LOCALS;
399
400 sz = header.vpu.count;
401 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
402
403 if (!sz)
404 return 0;
405 if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
406 return -EINVAL;
407
408 /* VAP is very sensitive so we purge cache before we program it
409 * and we also flush its state before & after */
410 BEGIN_RING(6);
411 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
412 OUT_RING(R300_RB3D_DC_FLUSH);
413 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
414 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
415 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
416 OUT_RING(0);
417 ADVANCE_RING();
418 /* set flush flag */
419 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
420
421 BEGIN_RING(3 + sz * 4);
422 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
423 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
424 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
425 ADVANCE_RING();
426
427 BEGIN_RING(2);
428 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
429 OUT_RING(0);
430 ADVANCE_RING();
431
432 return 0;
433}
434
435/**
436 * Emit a clear packet from userspace.
437 * Called by r300_emit_packet3.
438 */
439static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
440 drm_radeon_kcmd_buffer_t *cmdbuf)
441{
442 RING_LOCALS;
443
444 if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
445 return -EINVAL;
446
447 BEGIN_RING(10);
448 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
449 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
450 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
451 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
452 ADVANCE_RING();
453
454 BEGIN_RING(4);
455 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
456 OUT_RING(R300_RB3D_DC_FLUSH);
457 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
458 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
459 ADVANCE_RING();
460 /* set flush flag */
461 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
462
463 return 0;
464}
465
466static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
467 drm_radeon_kcmd_buffer_t *cmdbuf,
468 u32 header)
469{
470 int count, i, k;
471#define MAX_ARRAY_PACKET 64
472 u32 *data;
473 u32 narrays;
474 RING_LOCALS;
475
476 count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
477
478 if ((count + 1) > MAX_ARRAY_PACKET) {
479 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
480 count);
481 return -EINVAL;
482 }
483 /* carefully check packet contents */
484
485 /* We have already read the header so advance the buffer. */
486 drm_buffer_advance(cmdbuf->buffer, 4);
487
488 narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
489 k = 0;
490 i = 1;
491 while ((k < narrays) && (i < (count + 1))) {
492 i++; /* skip attribute field */
493 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
494 if (!radeon_check_offset(dev_priv, *data)) {
495 DRM_ERROR
496 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
497 k, i);
498 return -EINVAL;
499 }
500 k++;
501 i++;
502 if (k == narrays)
503 break;
504 /* have one more to process, they come in pairs */
505 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
506 if (!radeon_check_offset(dev_priv, *data)) {
507 DRM_ERROR
508 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
509 k, i);
510 return -EINVAL;
511 }
512 k++;
513 i++;
514 }
515 /* do the counts match what we expect ? */
516 if ((k != narrays) || (i != (count + 1))) {
517 DRM_ERROR
518 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
519 k, i, narrays, count + 1);
520 return -EINVAL;
521 }
522
523 /* all clear, output packet */
524
525 BEGIN_RING(count + 2);
526 OUT_RING(header);
527 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
528 ADVANCE_RING();
529
530 return 0;
531}
532
533static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
534 drm_radeon_kcmd_buffer_t *cmdbuf)
535{
536 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
537 int count, ret;
538 RING_LOCALS;
539
540
541 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
542
543 if (*cmd & 0x8000) {
544 u32 offset;
545 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
546 if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
547 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
548
549 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
550 offset = *cmd2 << 10;
551 ret = !radeon_check_offset(dev_priv, offset);
552 if (ret) {
553 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
554 return -EINVAL;
555 }
556 }
557
558 if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
559 (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
560 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
561 offset = *cmd3 << 10;
562 ret = !radeon_check_offset(dev_priv, offset);
563 if (ret) {
564 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
565 return -EINVAL;
566 }
567
568 }
569 }
570
571 BEGIN_RING(count+2);
572 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
573 ADVANCE_RING();
574
575 return 0;
576}
577
578static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
579 drm_radeon_kcmd_buffer_t *cmdbuf)
580{
581 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
582 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
583 int count;
584 int expected_count;
585 RING_LOCALS;
586
587 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
588
589 expected_count = *cmd1 >> 16;
590 if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
591 expected_count = (expected_count+1)/2;
592
593 if (count && count != expected_count) {
594 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
595 count, expected_count);
596 return -EINVAL;
597 }
598
599 BEGIN_RING(count+2);
600 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
601 ADVANCE_RING();
602
603 if (!count) {
604 drm_r300_cmd_header_t stack_header, *header;
605 u32 *cmd1, *cmd2, *cmd3;
606
607 if (drm_buffer_unprocessed(cmdbuf->buffer)
608 < 4*4 + sizeof(stack_header)) {
609 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
610 return -EINVAL;
611 }
612
613 header = drm_buffer_read_object(cmdbuf->buffer,
614 sizeof(stack_header), &stack_header);
615
616 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
617 cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
618 cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
619 cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
620
621 if (header->header.cmd_type != R300_CMD_PACKET3 ||
622 header->packet3.packet != R300_CMD_PACKET3_RAW ||
623 *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
624 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
625 return -EINVAL;
626 }
627
628 if ((*cmd1 & 0x8000ffff) != 0x80000810) {
629 DRM_ERROR("Invalid indx_buffer reg address %08X\n",
630 *cmd1);
631 return -EINVAL;
632 }
633 if (!radeon_check_offset(dev_priv, *cmd2)) {
634 DRM_ERROR("Invalid indx_buffer offset is %08X\n",
635 *cmd2);
636 return -EINVAL;
637 }
638 if (*cmd3 != expected_count) {
639 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
640 *cmd3, expected_count);
641 return -EINVAL;
642 }
643
644 BEGIN_RING(4);
645 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
646 ADVANCE_RING();
647 }
648
649 return 0;
650}
651
652static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
653 drm_radeon_kcmd_buffer_t *cmdbuf)
654{
655 u32 *header;
656 int count;
657 RING_LOCALS;
658
659 if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
660 return -EINVAL;
661
662 /* Fixme !! This simply emits a packet without much checking.
663 We need to be smarter. */
664
665 /* obtain first word - actual packet3 header */
666 header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
667
668 /* Is it packet 3 ? */
669 if ((*header >> 30) != 0x3) {
670 DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
671 return -EINVAL;
672 }
673
674 count = (*header >> 16) & 0x3fff;
675
676 /* Check again now that we know how much data to expect */
677 if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
678 DRM_ERROR
679 ("Expected packet3 of length %d but have only %d bytes left\n",
680 (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
681 return -EINVAL;
682 }
683
684 /* Is it a packet type we know about ? */
685 switch (*header & 0xff00) {
686 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
687 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
688
689 case RADEON_CNTL_BITBLT_MULTI:
690 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
691
692 case RADEON_CP_INDX_BUFFER:
693 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
694 return -EINVAL;
695 case RADEON_CP_3D_DRAW_IMMD_2:
696 /* triggers drawing using in-packet vertex data */
697 case RADEON_CP_3D_DRAW_VBUF_2:
698 /* triggers drawing of vertex buffers setup elsewhere */
699 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
700 RADEON_PURGE_EMITED);
701 break;
702 case RADEON_CP_3D_DRAW_INDX_2:
703 /* triggers drawing using indices to vertex buffer */
704 /* whenever we send vertex we clear flush & purge */
705 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
706 RADEON_PURGE_EMITED);
707 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
708 case RADEON_WAIT_FOR_IDLE:
709 case RADEON_CP_NOP:
710 /* these packets are safe */
711 break;
712 default:
713 DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
714 return -EINVAL;
715 }
716
717 BEGIN_RING(count + 2);
718 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
719 ADVANCE_RING();
720
721 return 0;
722}
723
724/**
725 * Emit a rendering packet3 from userspace.
726 * Called by r300_do_cp_cmdbuf.
727 */
728static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
729 drm_radeon_kcmd_buffer_t *cmdbuf,
730 drm_r300_cmd_header_t header)
731{
732 int n;
733 int ret;
734 int orig_iter = cmdbuf->buffer->iterator;
735
736 /* This is a do-while-loop so that we run the interior at least once,
737 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
738 */
739 n = 0;
740 do {
741 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
742 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
743 if (ret)
744 return ret;
745
746 cmdbuf->buffer->iterator = orig_iter;
747 }
748
749 switch (header.packet3.packet) {
750 case R300_CMD_PACKET3_CLEAR:
751 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
752 ret = r300_emit_clear(dev_priv, cmdbuf);
753 if (ret) {
754 DRM_ERROR("r300_emit_clear failed\n");
755 return ret;
756 }
757 break;
758
759 case R300_CMD_PACKET3_RAW:
760 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
761 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
762 if (ret) {
763 DRM_ERROR("r300_emit_raw_packet3 failed\n");
764 return ret;
765 }
766 break;
767
768 default:
769 DRM_ERROR("bad packet3 type %i at byte %d\n",
770 header.packet3.packet,
771 cmdbuf->buffer->iterator - (int)sizeof(header));
772 return -EINVAL;
773 }
774
775 n += R300_SIMULTANEOUS_CLIPRECTS;
776 } while (n < cmdbuf->nbox);
777
778 return 0;
779}
780
781/* Some of the R300 chips seem to be extremely touchy about the two registers
782 * that are configured in r300_pacify.
783 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
784 * sends a command buffer that contains only state setting commands and a
785 * vertex program/parameter upload sequence, this will eventually lead to a
786 * lockup, unless the sequence is bracketed by calls to r300_pacify.
787 * So we should take great care to *always* call r300_pacify before
788 * *anything* 3D related, and again afterwards. This is what the
789 * call bracket in r300_do_cp_cmdbuf is for.
790 */
791
792/**
793 * Emit the sequence to pacify R300.
794 */
795static void r300_pacify(drm_radeon_private_t *dev_priv)
796{
797 uint32_t cache_z, cache_3d, cache_2d;
798 RING_LOCALS;
799
800 cache_z = R300_ZC_FLUSH;
801 cache_2d = R300_RB2D_DC_FLUSH;
802 cache_3d = R300_RB3D_DC_FLUSH;
803 if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
804 /* we can purge, primitive where draw since last purge */
805 cache_z |= R300_ZC_FREE;
806 cache_2d |= R300_RB2D_DC_FREE;
807 cache_3d |= R300_RB3D_DC_FREE;
808 }
809
810 /* flush & purge zbuffer */
811 BEGIN_RING(2);
812 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
813 OUT_RING(cache_z);
814 ADVANCE_RING();
815 /* flush & purge 3d */
816 BEGIN_RING(2);
817 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
818 OUT_RING(cache_3d);
819 ADVANCE_RING();
820 /* flush & purge texture */
821 BEGIN_RING(2);
822 OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
823 OUT_RING(0);
824 ADVANCE_RING();
825 /* FIXME: is this one really needed ? */
826 BEGIN_RING(2);
827 OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
828 OUT_RING(0);
829 ADVANCE_RING();
830 BEGIN_RING(2);
831 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
832 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
833 ADVANCE_RING();
834 /* flush & purge 2d through E2 as RB2D will trigger lockup */
835 BEGIN_RING(4);
836 OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
837 OUT_RING(cache_2d);
838 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
839 OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
840 RADEON_WAIT_HOST_IDLECLEAN);
841 ADVANCE_RING();
842 /* set flush & purge flags */
843 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
844}
845
846/**
847 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
848 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
849 * be careful about how this function is called.
850 */
851static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
852{
853 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
854 struct drm_radeon_master_private *master_priv = master->driver_priv;
855
856 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
857 buf->pending = 1;
858 buf->used = 0;
859}
860
861static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
862 drm_r300_cmd_header_t header)
863{
864 u32 wait_until;
865 RING_LOCALS;
866
867 if (!header.wait.flags)
868 return;
869
870 wait_until = 0;
871
872 switch(header.wait.flags) {
873 case R300_WAIT_2D:
874 wait_until = RADEON_WAIT_2D_IDLE;
875 break;
876 case R300_WAIT_3D:
877 wait_until = RADEON_WAIT_3D_IDLE;
878 break;
879 case R300_NEW_WAIT_2D_3D:
880 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
881 break;
882 case R300_NEW_WAIT_2D_2D_CLEAN:
883 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
884 break;
885 case R300_NEW_WAIT_3D_3D_CLEAN:
886 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
887 break;
888 case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
889 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
890 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
891 break;
892 default:
893 return;
894 }
895
896 BEGIN_RING(2);
897 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
898 OUT_RING(wait_until);
899 ADVANCE_RING();
900}
901
902static int r300_scratch(drm_radeon_private_t *dev_priv,
903 drm_radeon_kcmd_buffer_t *cmdbuf,
904 drm_r300_cmd_header_t header)
905{
906 u32 *ref_age_base;
907 u32 i, *buf_idx, h_pending;
908 u64 *ptr_addr;
909 u64 stack_ptr_addr;
910 RING_LOCALS;
911
912 if (drm_buffer_unprocessed(cmdbuf->buffer) <
913 (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
914 return -EINVAL;
915 }
916
917 if (header.scratch.reg >= 5) {
918 return -EINVAL;
919 }
920
921 dev_priv->scratch_ages[header.scratch.reg]++;
922
923 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
924 sizeof(stack_ptr_addr), &stack_ptr_addr);
925 ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
926
927 for (i=0; i < header.scratch.n_bufs; i++) {
928 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
929 *buf_idx *= 2; /* 8 bytes per buf */
930
931 if (copy_to_user(ref_age_base + *buf_idx,
932 &dev_priv->scratch_ages[header.scratch.reg],
933 sizeof(u32)))
934 return -EINVAL;
935
936 if (copy_from_user(&h_pending,
937 ref_age_base + *buf_idx + 1,
938 sizeof(u32)))
939 return -EINVAL;
940
941 if (h_pending == 0)
942 return -EINVAL;
943
944 h_pending--;
945
946 if (copy_to_user(ref_age_base + *buf_idx + 1,
947 &h_pending,
948 sizeof(u32)))
949 return -EINVAL;
950
951 drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
952 }
953
954 BEGIN_RING(2);
955 OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
956 OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
957 ADVANCE_RING();
958
959 return 0;
960}
961
962/**
963 * Uploads user-supplied vertex program instructions or parameters onto
964 * the graphics card.
965 * Called by r300_do_cp_cmdbuf.
966 */
967static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
968 drm_radeon_kcmd_buffer_t *cmdbuf,
969 drm_r300_cmd_header_t header)
970{
971 int sz;
972 int addr;
973 int type;
974 int isclamp;
975 int stride;
976 RING_LOCALS;
977
978 sz = header.r500fp.count;
979 /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
980 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
981
982 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
983 isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
984
985 addr |= (type << 16);
986 addr |= (isclamp << 17);
987
988 stride = type ? 4 : 6;
989
990 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
991 if (!sz)
992 return 0;
993 if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
994 return -EINVAL;
995
996 BEGIN_RING(3 + sz * stride);
997 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
998 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
999 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
1000
1001 ADVANCE_RING();
1002
1003 return 0;
1004}
1005
1006
1007/**
1008 * Parses and validates a user-supplied command buffer and emits appropriate
1009 * commands on the DMA ring buffer.
1010 * Called by the ioctl handler function radeon_cp_cmdbuf.
1011 */
1012int r300_do_cp_cmdbuf(struct drm_device *dev,
1013 struct drm_file *file_priv,
1014 drm_radeon_kcmd_buffer_t *cmdbuf)
1015{
1016 drm_radeon_private_t *dev_priv = dev->dev_private;
1017 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
1018 struct drm_device_dma *dma = dev->dma;
1019 struct drm_buf *buf = NULL;
1020 int emit_dispatch_age = 0;
1021 int ret = 0;
1022
1023 DRM_DEBUG("\n");
1024
1025 /* pacify */
1026 r300_pacify(dev_priv);
1027
1028 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
1029 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
1030 if (ret)
1031 goto cleanup;
1032 }
1033
1034 while (drm_buffer_unprocessed(cmdbuf->buffer)
1035 >= sizeof(drm_r300_cmd_header_t)) {
1036 int idx;
1037 drm_r300_cmd_header_t *header, stack_header;
1038
1039 header = drm_buffer_read_object(cmdbuf->buffer,
1040 sizeof(stack_header), &stack_header);
1041
1042 switch (header->header.cmd_type) {
1043 case R300_CMD_PACKET0:
1044 DRM_DEBUG("R300_CMD_PACKET0\n");
1045 ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
1046 if (ret) {
1047 DRM_ERROR("r300_emit_packet0 failed\n");
1048 goto cleanup;
1049 }
1050 break;
1051
1052 case R300_CMD_VPU:
1053 DRM_DEBUG("R300_CMD_VPU\n");
1054 ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
1055 if (ret) {
1056 DRM_ERROR("r300_emit_vpu failed\n");
1057 goto cleanup;
1058 }
1059 break;
1060
1061 case R300_CMD_PACKET3:
1062 DRM_DEBUG("R300_CMD_PACKET3\n");
1063 ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
1064 if (ret) {
1065 DRM_ERROR("r300_emit_packet3 failed\n");
1066 goto cleanup;
1067 }
1068 break;
1069
1070 case R300_CMD_END3D:
1071 DRM_DEBUG("R300_CMD_END3D\n");
1072 /* TODO:
1073 Ideally userspace driver should not need to issue this call,
1074 i.e. the drm driver should issue it automatically and prevent
1075 lockups.
1076
1077 In practice, we do not understand why this call is needed and what
1078 it does (except for some vague guesses that it has to do with cache
1079 coherence) and so the user space driver does it.
1080
1081 Once we are sure which uses prevent lockups the code could be moved
1082 into the kernel and the userspace driver will not
1083 need to use this command.
1084
1085 Note that issuing this command does not hurt anything
1086 except, possibly, performance */
1087 r300_pacify(dev_priv);
1088 break;
1089
1090 case R300_CMD_CP_DELAY:
1091 /* simple enough, we can do it here */
1092 DRM_DEBUG("R300_CMD_CP_DELAY\n");
1093 {
1094 int i;
1095 RING_LOCALS;
1096
1097 BEGIN_RING(header->delay.count);
1098 for (i = 0; i < header->delay.count; i++)
1099 OUT_RING(RADEON_CP_PACKET2);
1100 ADVANCE_RING();
1101 }
1102 break;
1103
1104 case R300_CMD_DMA_DISCARD:
1105 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
1106 idx = header->dma.buf_idx;
1107 if (idx < 0 || idx >= dma->buf_count) {
1108 DRM_ERROR("buffer index %d (of %d max)\n",
1109 idx, dma->buf_count - 1);
1110 ret = -EINVAL;
1111 goto cleanup;
1112 }
1113
1114 buf = dma->buflist[idx];
1115 if (buf->file_priv != file_priv || buf->pending) {
1116 DRM_ERROR("bad buffer %p %p %d\n",
1117 buf->file_priv, file_priv,
1118 buf->pending);
1119 ret = -EINVAL;
1120 goto cleanup;
1121 }
1122
1123 emit_dispatch_age = 1;
1124 r300_discard_buffer(dev, file_priv->master, buf);
1125 break;
1126
1127 case R300_CMD_WAIT:
1128 DRM_DEBUG("R300_CMD_WAIT\n");
1129 r300_cmd_wait(dev_priv, *header);
1130 break;
1131
1132 case R300_CMD_SCRATCH:
1133 DRM_DEBUG("R300_CMD_SCRATCH\n");
1134 ret = r300_scratch(dev_priv, cmdbuf, *header);
1135 if (ret) {
1136 DRM_ERROR("r300_scratch failed\n");
1137 goto cleanup;
1138 }
1139 break;
1140
1141 case R300_CMD_R500FP:
1142 if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1143 DRM_ERROR("Calling r500 command on r300 card\n");
1144 ret = -EINVAL;
1145 goto cleanup;
1146 }
1147 DRM_DEBUG("R300_CMD_R500FP\n");
1148 ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
1149 if (ret) {
1150 DRM_ERROR("r300_emit_r500fp failed\n");
1151 goto cleanup;
1152 }
1153 break;
1154 default:
1155 DRM_ERROR("bad cmd_type %i at byte %d\n",
1156 header->header.cmd_type,
1157 cmdbuf->buffer->iterator - (int)sizeof(*header));
1158 ret = -EINVAL;
1159 goto cleanup;
1160 }
1161 }
1162
1163 DRM_DEBUG("END\n");
1164
1165 cleanup:
1166 r300_pacify(dev_priv);
1167
1168 /* We emit the vertex buffer age here, outside the pacifier "brackets"
1169 * for two reasons:
1170 * (1) This may coalesce multiple age emissions into a single one and
1171 * (2) more importantly, some chips lock up hard when scratch registers
1172 * are written inside the pacifier bracket.
1173 */
1174 if (emit_dispatch_age) {
1175 RING_LOCALS;
1176
1177 /* Emit the vertex buffer age */
1178 BEGIN_RING(2);
1179 RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
1180 ADVANCE_RING();
1181 }
1182
1183 COMMIT_RING();
1184
1185 return ret;
1186}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
deleted file mode 100644
index daf7572be976..000000000000
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ /dev/null
@@ -1,874 +0,0 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 *
26 * ------------------------ This file is DEPRECATED! -------------------------
27 */
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#include "radeon_drv.h"
31
32#include "r600_blit_shaders.h"
33
34/* 23 bits of float fractional data */
35#define I2F_FRAC_BITS 23
36#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
37
38/*
39 * Converts unsigned integer into 32-bit IEEE floating point representation.
40 * Will be exact from 0 to 2^24. Above that, we round towards zero
41 * as the fractional bits will not fit in a float. (It would be better to
42 * round towards even as the fpu does, but that is slower.)
43 */
44static __pure uint32_t int2float(uint32_t x)
45{
46 uint32_t msb, exponent, fraction;
47
48 /* Zero is special */
49 if (!x) return 0;
50
51 /* Get location of the most significant bit */
52 msb = __fls(x);
53
54 /*
55 * Use a rotate instead of a shift because that works both leftwards
56 * and rightwards due to the mod(32) behaviour. This means we don't
57 * need to check to see if we are above 2^24 or not.
58 */
59 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
60 exponent = (127 + msb) << I2F_FRAC_BITS;
61
62 return fraction + exponent;
63}
64
65#define DI_PT_RECTLIST 0x11
66#define DI_INDEX_SIZE_16_BIT 0x0
67#define DI_SRC_SEL_AUTO_INDEX 0x2
68
69#define FMT_8 0x1
70#define FMT_5_6_5 0x8
71#define FMT_8_8_8_8 0x1a
72#define COLOR_8 0x1
73#define COLOR_5_6_5 0x8
74#define COLOR_8_8_8_8 0x1a
75
76static void
77set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
78{
79 u32 cb_color_info;
80 int pitch, slice;
81 RING_LOCALS;
82 DRM_DEBUG("\n");
83
84 h = ALIGN(h, 8);
85 if (h < 8)
86 h = 8;
87
88 cb_color_info = ((format << 2) | (1 << 27));
89 pitch = (w / 8) - 1;
90 slice = ((w * h) / 64) - 1;
91
92 if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
93 ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
94 BEGIN_RING(21 + 2);
95 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
96 OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
97 OUT_RING(gpu_addr >> 8);
98 OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
99 OUT_RING(2 << 0);
100 } else {
101 BEGIN_RING(21);
102 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
103 OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
104 OUT_RING(gpu_addr >> 8);
105 }
106
107 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
108 OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
109 OUT_RING((pitch << 0) | (slice << 10));
110
111 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
112 OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
113 OUT_RING(0);
114
115 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
116 OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
117 OUT_RING(cb_color_info);
118
119 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
120 OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
121 OUT_RING(0);
122
123 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
124 OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
125 OUT_RING(0);
126
127 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
128 OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
129 OUT_RING(0);
130
131 ADVANCE_RING();
132}
133
134static void
135cp_set_surface_sync(drm_radeon_private_t *dev_priv,
136 u32 sync_type, u32 size, u64 mc_addr)
137{
138 u32 cp_coher_size;
139 RING_LOCALS;
140 DRM_DEBUG("\n");
141
142 if (size == 0xffffffff)
143 cp_coher_size = 0xffffffff;
144 else
145 cp_coher_size = ((size + 255) >> 8);
146
147 BEGIN_RING(5);
148 OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
149 OUT_RING(sync_type);
150 OUT_RING(cp_coher_size);
151 OUT_RING((mc_addr >> 8));
152 OUT_RING(10); /* poll interval */
153 ADVANCE_RING();
154}
155
156static void
157set_shaders(struct drm_device *dev)
158{
159 drm_radeon_private_t *dev_priv = dev->dev_private;
160 u64 gpu_addr;
161 int i;
162 u32 *vs, *ps;
163 uint32_t sq_pgm_resources;
164 RING_LOCALS;
165 DRM_DEBUG("\n");
166
167 /* load shaders */
168 vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
169 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
170
171 for (i = 0; i < r6xx_vs_size; i++)
172 vs[i] = cpu_to_le32(r6xx_vs[i]);
173 for (i = 0; i < r6xx_ps_size; i++)
174 ps[i] = cpu_to_le32(r6xx_ps[i]);
175
176 dev_priv->blit_vb->used = 512;
177
178 gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
179
180 /* setup shader regs */
181 sq_pgm_resources = (1 << 0);
182
183 BEGIN_RING(9 + 12);
184 /* VS */
185 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
186 OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
187 OUT_RING(gpu_addr >> 8);
188
189 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
190 OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
191 OUT_RING(sq_pgm_resources);
192
193 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
194 OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
195 OUT_RING(0);
196
197 /* PS */
198 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
199 OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
200 OUT_RING((gpu_addr + 256) >> 8);
201
202 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
203 OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
204 OUT_RING(sq_pgm_resources | (1 << 28));
205
206 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
207 OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
208 OUT_RING(2);
209
210 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
211 OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
212 OUT_RING(0);
213 ADVANCE_RING();
214
215 cp_set_surface_sync(dev_priv,
216 R600_SH_ACTION_ENA, 512, gpu_addr);
217}
218
219static void
220set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
221{
222 uint32_t sq_vtx_constant_word2;
223 RING_LOCALS;
224 DRM_DEBUG("\n");
225
226 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
227#ifdef __BIG_ENDIAN
228 sq_vtx_constant_word2 |= (2 << 30);
229#endif
230
231 BEGIN_RING(9);
232 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
233 OUT_RING(0x460);
234 OUT_RING(gpu_addr & 0xffffffff);
235 OUT_RING(48 - 1);
236 OUT_RING(sq_vtx_constant_word2);
237 OUT_RING(1 << 0);
238 OUT_RING(0);
239 OUT_RING(0);
240 OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
241 ADVANCE_RING();
242
243 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
244 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
245 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
246 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
247 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
248 cp_set_surface_sync(dev_priv,
249 R600_TC_ACTION_ENA, 48, gpu_addr);
250 else
251 cp_set_surface_sync(dev_priv,
252 R600_VC_ACTION_ENA, 48, gpu_addr);
253}
254
255static void
256set_tex_resource(drm_radeon_private_t *dev_priv,
257 int format, int w, int h, int pitch, u64 gpu_addr)
258{
259 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
260 RING_LOCALS;
261 DRM_DEBUG("\n");
262
263 if (h < 1)
264 h = 1;
265
266 sq_tex_resource_word0 = (1 << 0);
267 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
268 ((w - 1) << 19));
269
270 sq_tex_resource_word1 = (format << 26);
271 sq_tex_resource_word1 |= ((h - 1) << 0);
272
273 sq_tex_resource_word4 = ((1 << 14) |
274 (0 << 16) |
275 (1 << 19) |
276 (2 << 22) |
277 (3 << 25));
278
279 BEGIN_RING(9);
280 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
281 OUT_RING(0);
282 OUT_RING(sq_tex_resource_word0);
283 OUT_RING(sq_tex_resource_word1);
284 OUT_RING(gpu_addr >> 8);
285 OUT_RING(gpu_addr >> 8);
286 OUT_RING(sq_tex_resource_word4);
287 OUT_RING(0);
288 OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
289 ADVANCE_RING();
290
291}
292
293static void
294set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
295{
296 RING_LOCALS;
297 DRM_DEBUG("\n");
298
299 BEGIN_RING(12);
300 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
301 OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
302 OUT_RING((x1 << 0) | (y1 << 16));
303 OUT_RING((x2 << 0) | (y2 << 16));
304
305 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
306 OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
307 OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
308 OUT_RING((x2 << 0) | (y2 << 16));
309
310 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
311 OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
312 OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
313 OUT_RING((x2 << 0) | (y2 << 16));
314 ADVANCE_RING();
315}
316
317static void
318draw_auto(drm_radeon_private_t *dev_priv)
319{
320 RING_LOCALS;
321 DRM_DEBUG("\n");
322
323 BEGIN_RING(10);
324 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
325 OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
326 OUT_RING(DI_PT_RECTLIST);
327
328 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
329#ifdef __BIG_ENDIAN
330 OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
331#else
332 OUT_RING(DI_INDEX_SIZE_16_BIT);
333#endif
334
335 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
336 OUT_RING(1);
337
338 OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
339 OUT_RING(3);
340 OUT_RING(DI_SRC_SEL_AUTO_INDEX);
341
342 ADVANCE_RING();
343 COMMIT_RING();
344}
345
346static void
347set_default_state(drm_radeon_private_t *dev_priv)
348{
349 int i;
350 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
351 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
352 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
353 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
354 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
355 RING_LOCALS;
356
357 switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
358 case CHIP_R600:
359 num_ps_gprs = 192;
360 num_vs_gprs = 56;
361 num_temp_gprs = 4;
362 num_gs_gprs = 0;
363 num_es_gprs = 0;
364 num_ps_threads = 136;
365 num_vs_threads = 48;
366 num_gs_threads = 4;
367 num_es_threads = 4;
368 num_ps_stack_entries = 128;
369 num_vs_stack_entries = 128;
370 num_gs_stack_entries = 0;
371 num_es_stack_entries = 0;
372 break;
373 case CHIP_RV630:
374 case CHIP_RV635:
375 num_ps_gprs = 84;
376 num_vs_gprs = 36;
377 num_temp_gprs = 4;
378 num_gs_gprs = 0;
379 num_es_gprs = 0;
380 num_ps_threads = 144;
381 num_vs_threads = 40;
382 num_gs_threads = 4;
383 num_es_threads = 4;
384 num_ps_stack_entries = 40;
385 num_vs_stack_entries = 40;
386 num_gs_stack_entries = 32;
387 num_es_stack_entries = 16;
388 break;
389 case CHIP_RV610:
390 case CHIP_RV620:
391 case CHIP_RS780:
392 case CHIP_RS880:
393 default:
394 num_ps_gprs = 84;
395 num_vs_gprs = 36;
396 num_temp_gprs = 4;
397 num_gs_gprs = 0;
398 num_es_gprs = 0;
399 num_ps_threads = 136;
400 num_vs_threads = 48;
401 num_gs_threads = 4;
402 num_es_threads = 4;
403 num_ps_stack_entries = 40;
404 num_vs_stack_entries = 40;
405 num_gs_stack_entries = 32;
406 num_es_stack_entries = 16;
407 break;
408 case CHIP_RV670:
409 num_ps_gprs = 144;
410 num_vs_gprs = 40;
411 num_temp_gprs = 4;
412 num_gs_gprs = 0;
413 num_es_gprs = 0;
414 num_ps_threads = 136;
415 num_vs_threads = 48;
416 num_gs_threads = 4;
417 num_es_threads = 4;
418 num_ps_stack_entries = 40;
419 num_vs_stack_entries = 40;
420 num_gs_stack_entries = 32;
421 num_es_stack_entries = 16;
422 break;
423 case CHIP_RV770:
424 num_ps_gprs = 192;
425 num_vs_gprs = 56;
426 num_temp_gprs = 4;
427 num_gs_gprs = 0;
428 num_es_gprs = 0;
429 num_ps_threads = 188;
430 num_vs_threads = 60;
431 num_gs_threads = 0;
432 num_es_threads = 0;
433 num_ps_stack_entries = 256;
434 num_vs_stack_entries = 256;
435 num_gs_stack_entries = 0;
436 num_es_stack_entries = 0;
437 break;
438 case CHIP_RV730:
439 case CHIP_RV740:
440 num_ps_gprs = 84;
441 num_vs_gprs = 36;
442 num_temp_gprs = 4;
443 num_gs_gprs = 0;
444 num_es_gprs = 0;
445 num_ps_threads = 188;
446 num_vs_threads = 60;
447 num_gs_threads = 0;
448 num_es_threads = 0;
449 num_ps_stack_entries = 128;
450 num_vs_stack_entries = 128;
451 num_gs_stack_entries = 0;
452 num_es_stack_entries = 0;
453 break;
454 case CHIP_RV710:
455 num_ps_gprs = 192;
456 num_vs_gprs = 56;
457 num_temp_gprs = 4;
458 num_gs_gprs = 0;
459 num_es_gprs = 0;
460 num_ps_threads = 144;
461 num_vs_threads = 48;
462 num_gs_threads = 0;
463 num_es_threads = 0;
464 num_ps_stack_entries = 128;
465 num_vs_stack_entries = 128;
466 num_gs_stack_entries = 0;
467 num_es_stack_entries = 0;
468 break;
469 }
470
471 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
472 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
473 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
474 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
475 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
476 sq_config = 0;
477 else
478 sq_config = R600_VC_ENABLE;
479
480 sq_config |= (R600_DX9_CONSTS |
481 R600_ALU_INST_PREFER_VECTOR |
482 R600_PS_PRIO(0) |
483 R600_VS_PRIO(1) |
484 R600_GS_PRIO(2) |
485 R600_ES_PRIO(3));
486
487 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
488 R600_NUM_VS_GPRS(num_vs_gprs) |
489 R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
490 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
491 R600_NUM_ES_GPRS(num_es_gprs));
492 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
493 R600_NUM_VS_THREADS(num_vs_threads) |
494 R600_NUM_GS_THREADS(num_gs_threads) |
495 R600_NUM_ES_THREADS(num_es_threads));
496 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
497 R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
498 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
499 R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
500
501 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
502 BEGIN_RING(r7xx_default_size + 10);
503 for (i = 0; i < r7xx_default_size; i++)
504 OUT_RING(r7xx_default_state[i]);
505 } else {
506 BEGIN_RING(r6xx_default_size + 10);
507 for (i = 0; i < r6xx_default_size; i++)
508 OUT_RING(r6xx_default_state[i]);
509 }
510 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
511 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
512 /* SQ config */
513 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
514 OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
515 OUT_RING(sq_config);
516 OUT_RING(sq_gpr_resource_mgmt_1);
517 OUT_RING(sq_gpr_resource_mgmt_2);
518 OUT_RING(sq_thread_resource_mgmt);
519 OUT_RING(sq_stack_resource_mgmt_1);
520 OUT_RING(sq_stack_resource_mgmt_2);
521 ADVANCE_RING();
522}
523
524static int r600_nomm_get_vb(struct drm_device *dev)
525{
526 drm_radeon_private_t *dev_priv = dev->dev_private;
527 dev_priv->blit_vb = radeon_freelist_get(dev);
528 if (!dev_priv->blit_vb) {
529 DRM_ERROR("Unable to allocate vertex buffer for blit\n");
530 return -EAGAIN;
531 }
532 return 0;
533}
534
535static void r600_nomm_put_vb(struct drm_device *dev)
536{
537 drm_radeon_private_t *dev_priv = dev->dev_private;
538
539 dev_priv->blit_vb->used = 0;
540 radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
541}
542
543static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
544{
545 drm_radeon_private_t *dev_priv = dev->dev_private;
546 return (((char *)dev->agp_buffer_map->handle +
547 dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
548}
549
550int
551r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
552{
553 drm_radeon_private_t *dev_priv = dev->dev_private;
554 int ret;
555 DRM_DEBUG("\n");
556
557 ret = r600_nomm_get_vb(dev);
558 if (ret)
559 return ret;
560
561 dev_priv->blit_vb->file_priv = file_priv;
562
563 set_default_state(dev_priv);
564 set_shaders(dev);
565
566 return 0;
567}
568
569
570void
571r600_done_blit_copy(struct drm_device *dev)
572{
573 drm_radeon_private_t *dev_priv = dev->dev_private;
574 RING_LOCALS;
575 DRM_DEBUG("\n");
576
577 BEGIN_RING(5);
578 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
579 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
580 /* wait for 3D idle clean */
581 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
582 OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
583 OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
584
585 ADVANCE_RING();
586 COMMIT_RING();
587
588 r600_nomm_put_vb(dev);
589}
590
591void
592r600_blit_copy(struct drm_device *dev,
593 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
594 int size_bytes)
595{
596 drm_radeon_private_t *dev_priv = dev->dev_private;
597 int max_bytes;
598 u64 vb_addr;
599 u32 *vb;
600
601 vb = r600_nomm_get_vb_ptr(dev);
602
603 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
604 max_bytes = 8192;
605
606 while (size_bytes) {
607 int cur_size = size_bytes;
608 int src_x = src_gpu_addr & 255;
609 int dst_x = dst_gpu_addr & 255;
610 int h = 1;
611 src_gpu_addr = src_gpu_addr & ~255;
612 dst_gpu_addr = dst_gpu_addr & ~255;
613
614 if (!src_x && !dst_x) {
615 h = (cur_size / max_bytes);
616 if (h > 8192)
617 h = 8192;
618 if (h == 0)
619 h = 1;
620 else
621 cur_size = max_bytes;
622 } else {
623 if (cur_size > max_bytes)
624 cur_size = max_bytes;
625 if (cur_size > (max_bytes - dst_x))
626 cur_size = (max_bytes - dst_x);
627 if (cur_size > (max_bytes - src_x))
628 cur_size = (max_bytes - src_x);
629 }
630
631 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
632
633 r600_nomm_put_vb(dev);
634 r600_nomm_get_vb(dev);
635 if (!dev_priv->blit_vb)
636 return;
637 set_shaders(dev);
638 vb = r600_nomm_get_vb_ptr(dev);
639 }
640
641 vb[0] = int2float(dst_x);
642 vb[1] = 0;
643 vb[2] = int2float(src_x);
644 vb[3] = 0;
645
646 vb[4] = int2float(dst_x);
647 vb[5] = int2float(h);
648 vb[6] = int2float(src_x);
649 vb[7] = int2float(h);
650
651 vb[8] = int2float(dst_x + cur_size);
652 vb[9] = int2float(h);
653 vb[10] = int2float(src_x + cur_size);
654 vb[11] = int2float(h);
655
656 /* src */
657 set_tex_resource(dev_priv, FMT_8,
658 src_x + cur_size, h, src_x + cur_size,
659 src_gpu_addr);
660
661 cp_set_surface_sync(dev_priv,
662 R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
663
664 /* dst */
665 set_render_target(dev_priv, COLOR_8,
666 dst_x + cur_size, h,
667 dst_gpu_addr);
668
669 /* scissors */
670 set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
671
672 /* Vertex buffer setup */
673 vb_addr = dev_priv->gart_buffers_offset +
674 dev_priv->blit_vb->offset +
675 dev_priv->blit_vb->used;
676 set_vtx_resource(dev_priv, vb_addr);
677
678 /* draw */
679 draw_auto(dev_priv);
680
681 cp_set_surface_sync(dev_priv,
682 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
683 cur_size * h, dst_gpu_addr);
684
685 vb += 12;
686 dev_priv->blit_vb->used += 12 * 4;
687
688 src_gpu_addr += cur_size * h;
689 dst_gpu_addr += cur_size * h;
690 size_bytes -= cur_size * h;
691 }
692 } else {
693 max_bytes = 8192 * 4;
694
695 while (size_bytes) {
696 int cur_size = size_bytes;
697 int src_x = (src_gpu_addr & 255);
698 int dst_x = (dst_gpu_addr & 255);
699 int h = 1;
700 src_gpu_addr = src_gpu_addr & ~255;
701 dst_gpu_addr = dst_gpu_addr & ~255;
702
703 if (!src_x && !dst_x) {
704 h = (cur_size / max_bytes);
705 if (h > 8192)
706 h = 8192;
707 if (h == 0)
708 h = 1;
709 else
710 cur_size = max_bytes;
711 } else {
712 if (cur_size > max_bytes)
713 cur_size = max_bytes;
714 if (cur_size > (max_bytes - dst_x))
715 cur_size = (max_bytes - dst_x);
716 if (cur_size > (max_bytes - src_x))
717 cur_size = (max_bytes - src_x);
718 }
719
720 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
721 r600_nomm_put_vb(dev);
722 r600_nomm_get_vb(dev);
723 if (!dev_priv->blit_vb)
724 return;
725
726 set_shaders(dev);
727 vb = r600_nomm_get_vb_ptr(dev);
728 }
729
730 vb[0] = int2float(dst_x / 4);
731 vb[1] = 0;
732 vb[2] = int2float(src_x / 4);
733 vb[3] = 0;
734
735 vb[4] = int2float(dst_x / 4);
736 vb[5] = int2float(h);
737 vb[6] = int2float(src_x / 4);
738 vb[7] = int2float(h);
739
740 vb[8] = int2float((dst_x + cur_size) / 4);
741 vb[9] = int2float(h);
742 vb[10] = int2float((src_x + cur_size) / 4);
743 vb[11] = int2float(h);
744
745 /* src */
746 set_tex_resource(dev_priv, FMT_8_8_8_8,
747 (src_x + cur_size) / 4,
748 h, (src_x + cur_size) / 4,
749 src_gpu_addr);
750
751 cp_set_surface_sync(dev_priv,
752 R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
753
754 /* dst */
755 set_render_target(dev_priv, COLOR_8_8_8_8,
756 (dst_x + cur_size) / 4, h,
757 dst_gpu_addr);
758
759 /* scissors */
760 set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
761
762 /* Vertex buffer setup */
763 vb_addr = dev_priv->gart_buffers_offset +
764 dev_priv->blit_vb->offset +
765 dev_priv->blit_vb->used;
766 set_vtx_resource(dev_priv, vb_addr);
767
768 /* draw */
769 draw_auto(dev_priv);
770
771 cp_set_surface_sync(dev_priv,
772 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
773 cur_size * h, dst_gpu_addr);
774
775 vb += 12;
776 dev_priv->blit_vb->used += 12 * 4;
777
778 src_gpu_addr += cur_size * h;
779 dst_gpu_addr += cur_size * h;
780 size_bytes -= cur_size * h;
781 }
782 }
783}
784
785void
786r600_blit_swap(struct drm_device *dev,
787 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
788 int sx, int sy, int dx, int dy,
789 int w, int h, int src_pitch, int dst_pitch, int cpp)
790{
791 drm_radeon_private_t *dev_priv = dev->dev_private;
792 int cb_format, tex_format;
793 int sx2, sy2, dx2, dy2;
794 u64 vb_addr;
795 u32 *vb;
796
797 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
798
799 r600_nomm_put_vb(dev);
800 r600_nomm_get_vb(dev);
801 if (!dev_priv->blit_vb)
802 return;
803
804 set_shaders(dev);
805 }
806 vb = r600_nomm_get_vb_ptr(dev);
807
808 sx2 = sx + w;
809 sy2 = sy + h;
810 dx2 = dx + w;
811 dy2 = dy + h;
812
813 vb[0] = int2float(dx);
814 vb[1] = int2float(dy);
815 vb[2] = int2float(sx);
816 vb[3] = int2float(sy);
817
818 vb[4] = int2float(dx);
819 vb[5] = int2float(dy2);
820 vb[6] = int2float(sx);
821 vb[7] = int2float(sy2);
822
823 vb[8] = int2float(dx2);
824 vb[9] = int2float(dy2);
825 vb[10] = int2float(sx2);
826 vb[11] = int2float(sy2);
827
828 switch(cpp) {
829 case 4:
830 cb_format = COLOR_8_8_8_8;
831 tex_format = FMT_8_8_8_8;
832 break;
833 case 2:
834 cb_format = COLOR_5_6_5;
835 tex_format = FMT_5_6_5;
836 break;
837 default:
838 cb_format = COLOR_8;
839 tex_format = FMT_8;
840 break;
841 }
842
843 /* src */
844 set_tex_resource(dev_priv, tex_format,
845 src_pitch / cpp,
846 sy2, src_pitch / cpp,
847 src_gpu_addr);
848
849 cp_set_surface_sync(dev_priv,
850 R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
851
852 /* dst */
853 set_render_target(dev_priv, cb_format,
854 dst_pitch / cpp, dy2,
855 dst_gpu_addr);
856
857 /* scissors */
858 set_scissors(dev_priv, dx, dy, dx2, dy2);
859
860 /* Vertex buffer setup */
861 vb_addr = dev_priv->gart_buffers_offset +
862 dev_priv->blit_vb->offset +
863 dev_priv->blit_vb->used;
864 set_vtx_resource(dev_priv, vb_addr);
865
866 /* draw */
867 draw_auto(dev_priv);
868
869 cp_set_surface_sync(dev_priv,
870 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
871 dst_pitch * dy2, dst_gpu_addr);
872
873 dev_priv->blit_vb->used += 12 * 4;
874}
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
deleted file mode 100644
index e231eeafef23..000000000000
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ /dev/null
@@ -1,2660 +0,0 @@
1/*
2 * Copyright 2008-2009 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Dave Airlie <airlied@redhat.com>
26 * Alex Deucher <alexander.deucher@amd.com>
27 *
28 * ------------------------ This file is DEPRECATED! -------------------------
29 */
30
31#include <linux/module.h>
32
33#include <drm/drmP.h>
34#include <drm/radeon_drm.h>
35#include "radeon_drv.h"
36
37#define PFP_UCODE_SIZE 576
38#define PM4_UCODE_SIZE 1792
39#define R700_PFP_UCODE_SIZE 848
40#define R700_PM4_UCODE_SIZE 1360
41
42/* Firmware Names */
43MODULE_FIRMWARE("radeon/R600_pfp.bin");
44MODULE_FIRMWARE("radeon/R600_me.bin");
45MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46MODULE_FIRMWARE("radeon/RV610_me.bin");
47MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48MODULE_FIRMWARE("radeon/RV630_me.bin");
49MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50MODULE_FIRMWARE("radeon/RV620_me.bin");
51MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52MODULE_FIRMWARE("radeon/RV635_me.bin");
53MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54MODULE_FIRMWARE("radeon/RV670_me.bin");
55MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56MODULE_FIRMWARE("radeon/RS780_me.bin");
57MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58MODULE_FIRMWARE("radeon/RV770_me.bin");
59MODULE_FIRMWARE("radeon/RV730_pfp.bin");
60MODULE_FIRMWARE("radeon/RV730_me.bin");
61MODULE_FIRMWARE("radeon/RV710_pfp.bin");
62MODULE_FIRMWARE("radeon/RV710_me.bin");
63
64
65int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
66 unsigned family, u32 *ib, int *l);
67void r600_cs_legacy_init(void);
68
69
70# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
71# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
72
73#define R600_PTE_VALID (1 << 0)
74#define R600_PTE_SYSTEM (1 << 1)
75#define R600_PTE_SNOOPED (1 << 2)
76#define R600_PTE_READABLE (1 << 5)
77#define R600_PTE_WRITEABLE (1 << 6)
78
79/* MAX values used for gfx init */
80#define R6XX_MAX_SH_GPRS 256
81#define R6XX_MAX_TEMP_GPRS 16
82#define R6XX_MAX_SH_THREADS 256
83#define R6XX_MAX_SH_STACK_ENTRIES 4096
84#define R6XX_MAX_BACKENDS 8
85#define R6XX_MAX_BACKENDS_MASK 0xff
86#define R6XX_MAX_SIMDS 8
87#define R6XX_MAX_SIMDS_MASK 0xff
88#define R6XX_MAX_PIPES 8
89#define R6XX_MAX_PIPES_MASK 0xff
90
91#define R7XX_MAX_SH_GPRS 256
92#define R7XX_MAX_TEMP_GPRS 16
93#define R7XX_MAX_SH_THREADS 256
94#define R7XX_MAX_SH_STACK_ENTRIES 4096
95#define R7XX_MAX_BACKENDS 8
96#define R7XX_MAX_BACKENDS_MASK 0xff
97#define R7XX_MAX_SIMDS 16
98#define R7XX_MAX_SIMDS_MASK 0xffff
99#define R7XX_MAX_PIPES 8
100#define R7XX_MAX_PIPES_MASK 0xff
101
102static int r600_do_wait_for_fifo(drm_radeon_private_t *dev_priv, int entries)
103{
104 int i;
105
106 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
107
108 for (i = 0; i < dev_priv->usec_timeout; i++) {
109 int slots;
110 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
111 slots = (RADEON_READ(R600_GRBM_STATUS)
112 & R700_CMDFIFO_AVAIL_MASK);
113 else
114 slots = (RADEON_READ(R600_GRBM_STATUS)
115 & R600_CMDFIFO_AVAIL_MASK);
116 if (slots >= entries)
117 return 0;
118 DRM_UDELAY(1);
119 }
120 DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
121 RADEON_READ(R600_GRBM_STATUS),
122 RADEON_READ(R600_GRBM_STATUS2));
123
124 return -EBUSY;
125}
126
127static int r600_do_wait_for_idle(drm_radeon_private_t *dev_priv)
128{
129 int i, ret;
130
131 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
132
133 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
134 ret = r600_do_wait_for_fifo(dev_priv, 8);
135 else
136 ret = r600_do_wait_for_fifo(dev_priv, 16);
137 if (ret)
138 return ret;
139 for (i = 0; i < dev_priv->usec_timeout; i++) {
140 if (!(RADEON_READ(R600_GRBM_STATUS) & R600_GUI_ACTIVE))
141 return 0;
142 DRM_UDELAY(1);
143 }
144 DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
145 RADEON_READ(R600_GRBM_STATUS),
146 RADEON_READ(R600_GRBM_STATUS2));
147
148 return -EBUSY;
149}
150
151void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
152{
153 struct drm_sg_mem *entry = dev->sg;
154 int max_pages;
155 int pages;
156 int i;
157
158 if (!entry)
159 return;
160
161 if (gart_info->bus_addr) {
162 max_pages = (gart_info->table_size / sizeof(u64));
163 pages = (entry->pages <= max_pages)
164 ? entry->pages : max_pages;
165
166 for (i = 0; i < pages; i++) {
167 if (!entry->busaddr[i])
168 break;
169 pci_unmap_page(dev->pdev, entry->busaddr[i],
170 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
171 }
172 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
173 gart_info->bus_addr = 0;
174 }
175}
176
177/* R600 has page table setup */
178int r600_page_table_init(struct drm_device *dev)
179{
180 drm_radeon_private_t *dev_priv = dev->dev_private;
181 struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
182 struct drm_local_map *map = &gart_info->mapping;
183 struct drm_sg_mem *entry = dev->sg;
184 int ret = 0;
185 int i, j;
186 int pages;
187 u64 page_base;
188 dma_addr_t entry_addr;
189 int max_ati_pages, max_real_pages, gart_idx;
190
191 /* okay page table is available - lets rock */
192 max_ati_pages = (gart_info->table_size / sizeof(u64));
193 max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
194
195 pages = (entry->pages <= max_real_pages) ?
196 entry->pages : max_real_pages;
197
198 memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u64));
199
200 gart_idx = 0;
201 for (i = 0; i < pages; i++) {
202 entry->busaddr[i] = pci_map_page(dev->pdev,
203 entry->pagelist[i], 0,
204 PAGE_SIZE,
205 PCI_DMA_BIDIRECTIONAL);
206 if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
207 DRM_ERROR("unable to map PCIGART pages!\n");
208 r600_page_table_cleanup(dev, gart_info);
209 goto done;
210 }
211 entry_addr = entry->busaddr[i];
212 for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
213 page_base = (u64) entry_addr & ATI_PCIGART_PAGE_MASK;
214 page_base |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
215 page_base |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
216
217 DRM_WRITE64(map, gart_idx * sizeof(u64), page_base);
218
219 gart_idx++;
220
221 if ((i % 128) == 0)
222 DRM_DEBUG("page entry %d: 0x%016llx\n",
223 i, (unsigned long long)page_base);
224 entry_addr += ATI_PCIGART_PAGE_SIZE;
225 }
226 }
227 ret = 1;
228done:
229 return ret;
230}
231
232static void r600_vm_flush_gart_range(struct drm_device *dev)
233{
234 drm_radeon_private_t *dev_priv = dev->dev_private;
235 u32 resp, countdown = 1000;
236 RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR, dev_priv->gart_vm_start >> 12);
237 RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
238 RADEON_WRITE(R600_VM_CONTEXT0_REQUEST_RESPONSE, 2);
239
240 do {
241 resp = RADEON_READ(R600_VM_CONTEXT0_REQUEST_RESPONSE);
242 countdown--;
243 DRM_UDELAY(1);
244 } while (((resp & 0xf0) == 0) && countdown);
245}
246
247static void r600_vm_init(struct drm_device *dev)
248{
249 drm_radeon_private_t *dev_priv = dev->dev_private;
250 /* initialise the VM to use the page table we constructed up there */
251 u32 vm_c0, i;
252 u32 mc_rd_a;
253 u32 vm_l2_cntl, vm_l2_cntl3;
254 /* okay set up the PCIE aperture type thingo */
255 RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
256 RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
257 RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
258
259 /* setup MC RD a */
260 mc_rd_a = R600_MCD_L1_TLB | R600_MCD_L1_FRAG_PROC | R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS |
261 R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | R600_MCD_EFFECTIVE_L1_TLB_SIZE(5) |
262 R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(5) | R600_MCD_WAIT_L2_QUERY;
263
264 RADEON_WRITE(R600_MCD_RD_A_CNTL, mc_rd_a);
265 RADEON_WRITE(R600_MCD_RD_B_CNTL, mc_rd_a);
266
267 RADEON_WRITE(R600_MCD_WR_A_CNTL, mc_rd_a);
268 RADEON_WRITE(R600_MCD_WR_B_CNTL, mc_rd_a);
269
270 RADEON_WRITE(R600_MCD_RD_GFX_CNTL, mc_rd_a);
271 RADEON_WRITE(R600_MCD_WR_GFX_CNTL, mc_rd_a);
272
273 RADEON_WRITE(R600_MCD_RD_SYS_CNTL, mc_rd_a);
274 RADEON_WRITE(R600_MCD_WR_SYS_CNTL, mc_rd_a);
275
276 RADEON_WRITE(R600_MCD_RD_HDP_CNTL, mc_rd_a | R600_MCD_L1_STRICT_ORDERING);
277 RADEON_WRITE(R600_MCD_WR_HDP_CNTL, mc_rd_a /*| R600_MCD_L1_STRICT_ORDERING*/);
278
279 RADEON_WRITE(R600_MCD_RD_PDMA_CNTL, mc_rd_a);
280 RADEON_WRITE(R600_MCD_WR_PDMA_CNTL, mc_rd_a);
281
282 RADEON_WRITE(R600_MCD_RD_SEM_CNTL, mc_rd_a | R600_MCD_SEMAPHORE_MODE);
283 RADEON_WRITE(R600_MCD_WR_SEM_CNTL, mc_rd_a);
284
285 vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
286 vm_l2_cntl |= R600_VM_L2_CNTL_QUEUE_SIZE(7);
287 RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
288
289 RADEON_WRITE(R600_VM_L2_CNTL2, 0);
290 vm_l2_cntl3 = (R600_VM_L2_CNTL3_BANK_SELECT_0(0) |
291 R600_VM_L2_CNTL3_BANK_SELECT_1(1) |
292 R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(2));
293 RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
294
295 vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
296
297 RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
298
299 vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
300
301 /* disable all other contexts */
302 for (i = 1; i < 8; i++)
303 RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
304
305 RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
306 RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
307 RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
308
309 r600_vm_flush_gart_range(dev);
310}
311
312static int r600_cp_init_microcode(drm_radeon_private_t *dev_priv)
313{
314 struct platform_device *pdev;
315 const char *chip_name;
316 size_t pfp_req_size, me_req_size;
317 char fw_name[30];
318 int err;
319
320 pdev = platform_device_register_simple("r600_cp", 0, NULL, 0);
321 err = IS_ERR(pdev);
322 if (err) {
323 printk(KERN_ERR "r600_cp: Failed to register firmware\n");
324 return -EINVAL;
325 }
326
327 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
328 case CHIP_R600: chip_name = "R600"; break;
329 case CHIP_RV610: chip_name = "RV610"; break;
330 case CHIP_RV630: chip_name = "RV630"; break;
331 case CHIP_RV620: chip_name = "RV620"; break;
332 case CHIP_RV635: chip_name = "RV635"; break;
333 case CHIP_RV670: chip_name = "RV670"; break;
334 case CHIP_RS780:
335 case CHIP_RS880: chip_name = "RS780"; break;
336 case CHIP_RV770: chip_name = "RV770"; break;
337 case CHIP_RV730:
338 case CHIP_RV740: chip_name = "RV730"; break;
339 case CHIP_RV710: chip_name = "RV710"; break;
340 default: BUG();
341 }
342
343 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
344 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
345 me_req_size = R700_PM4_UCODE_SIZE * 4;
346 } else {
347 pfp_req_size = PFP_UCODE_SIZE * 4;
348 me_req_size = PM4_UCODE_SIZE * 12;
349 }
350
351 DRM_INFO("Loading %s CP Microcode\n", chip_name);
352
353 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
354 err = request_firmware(&dev_priv->pfp_fw, fw_name, &pdev->dev);
355 if (err)
356 goto out;
357 if (dev_priv->pfp_fw->size != pfp_req_size) {
358 printk(KERN_ERR
359 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
360 dev_priv->pfp_fw->size, fw_name);
361 err = -EINVAL;
362 goto out;
363 }
364
365 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
366 err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
367 if (err)
368 goto out;
369 if (dev_priv->me_fw->size != me_req_size) {
370 printk(KERN_ERR
371 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
372 dev_priv->me_fw->size, fw_name);
373 err = -EINVAL;
374 }
375out:
376 platform_device_unregister(pdev);
377
378 if (err) {
379 if (err != -EINVAL)
380 printk(KERN_ERR
381 "r600_cp: Failed to load firmware \"%s\"\n",
382 fw_name);
383 release_firmware(dev_priv->pfp_fw);
384 dev_priv->pfp_fw = NULL;
385 release_firmware(dev_priv->me_fw);
386 dev_priv->me_fw = NULL;
387 }
388 return err;
389}
390
391static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
392{
393 const __be32 *fw_data;
394 int i;
395
396 if (!dev_priv->me_fw || !dev_priv->pfp_fw)
397 return;
398
399 r600_do_cp_stop(dev_priv);
400
401 RADEON_WRITE(R600_CP_RB_CNTL,
402#ifdef __BIG_ENDIAN
403 R600_BUF_SWAP_32BIT |
404#endif
405 R600_RB_NO_UPDATE |
406 R600_RB_BLKSZ(15) |
407 R600_RB_BUFSZ(3));
408
409 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
410 RADEON_READ(R600_GRBM_SOFT_RESET);
411 mdelay(15);
412 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
413
414 fw_data = (const __be32 *)dev_priv->me_fw->data;
415 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
416 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
417 RADEON_WRITE(R600_CP_ME_RAM_DATA,
418 be32_to_cpup(fw_data++));
419
420 fw_data = (const __be32 *)dev_priv->pfp_fw->data;
421 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
422 for (i = 0; i < PFP_UCODE_SIZE; i++)
423 RADEON_WRITE(R600_CP_PFP_UCODE_DATA,
424 be32_to_cpup(fw_data++));
425
426 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
427 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
428 RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
429
430}
431
432static void r700_vm_init(struct drm_device *dev)
433{
434 drm_radeon_private_t *dev_priv = dev->dev_private;
435 /* initialise the VM to use the page table we constructed up there */
436 u32 vm_c0, i;
437 u32 mc_vm_md_l1;
438 u32 vm_l2_cntl, vm_l2_cntl3;
439 /* okay set up the PCIE aperture type thingo */
440 RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
441 RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
442 RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
443
444 mc_vm_md_l1 = R700_ENABLE_L1_TLB |
445 R700_ENABLE_L1_FRAGMENT_PROCESSING |
446 R700_SYSTEM_ACCESS_MODE_IN_SYS |
447 R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
448 R700_EFFECTIVE_L1_TLB_SIZE(5) |
449 R700_EFFECTIVE_L1_QUEUE_SIZE(5);
450
451 RADEON_WRITE(R700_MC_VM_MD_L1_TLB0_CNTL, mc_vm_md_l1);
452 RADEON_WRITE(R700_MC_VM_MD_L1_TLB1_CNTL, mc_vm_md_l1);
453 RADEON_WRITE(R700_MC_VM_MD_L1_TLB2_CNTL, mc_vm_md_l1);
454 RADEON_WRITE(R700_MC_VM_MB_L1_TLB0_CNTL, mc_vm_md_l1);
455 RADEON_WRITE(R700_MC_VM_MB_L1_TLB1_CNTL, mc_vm_md_l1);
456 RADEON_WRITE(R700_MC_VM_MB_L1_TLB2_CNTL, mc_vm_md_l1);
457 RADEON_WRITE(R700_MC_VM_MB_L1_TLB3_CNTL, mc_vm_md_l1);
458
459 vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
460 vm_l2_cntl |= R700_VM_L2_CNTL_QUEUE_SIZE(7);
461 RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
462
463 RADEON_WRITE(R600_VM_L2_CNTL2, 0);
464 vm_l2_cntl3 = R700_VM_L2_CNTL3_BANK_SELECT(0) | R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(2);
465 RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
466
467 vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
468
469 RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
470
471 vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
472
473 /* disable all other contexts */
474 for (i = 1; i < 8; i++)
475 RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
476
477 RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
478 RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
479 RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
480
481 r600_vm_flush_gart_range(dev);
482}
483
484static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
485{
486 const __be32 *fw_data;
487 int i;
488
489 if (!dev_priv->me_fw || !dev_priv->pfp_fw)
490 return;
491
492 r600_do_cp_stop(dev_priv);
493
494 RADEON_WRITE(R600_CP_RB_CNTL,
495#ifdef __BIG_ENDIAN
496 R600_BUF_SWAP_32BIT |
497#endif
498 R600_RB_NO_UPDATE |
499 R600_RB_BLKSZ(15) |
500 R600_RB_BUFSZ(3));
501
502 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
503 RADEON_READ(R600_GRBM_SOFT_RESET);
504 mdelay(15);
505 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
506
507 fw_data = (const __be32 *)dev_priv->pfp_fw->data;
508 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
509 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
510 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
511 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
512
513 fw_data = (const __be32 *)dev_priv->me_fw->data;
514 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
515 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
516 RADEON_WRITE(R600_CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
517 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
518
519 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
520 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
521 RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
522
523}
524
525static void r600_test_writeback(drm_radeon_private_t *dev_priv)
526{
527 u32 tmp;
528
529 /* Start with assuming that writeback doesn't work */
530 dev_priv->writeback_works = 0;
531
532 /* Writeback doesn't seem to work everywhere, test it here and possibly
533 * enable it if it appears to work
534 */
535 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
536
537 RADEON_WRITE(R600_SCRATCH_REG1, 0xdeadbeef);
538
539 for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
540 u32 val;
541
542 val = radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(1));
543 if (val == 0xdeadbeef)
544 break;
545 DRM_UDELAY(1);
546 }
547
548 if (tmp < dev_priv->usec_timeout) {
549 dev_priv->writeback_works = 1;
550 DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
551 } else {
552 dev_priv->writeback_works = 0;
553 DRM_INFO("writeback test failed\n");
554 }
555 if (radeon_no_wb == 1) {
556 dev_priv->writeback_works = 0;
557 DRM_INFO("writeback forced off\n");
558 }
559
560 if (!dev_priv->writeback_works) {
561 /* Disable writeback to avoid unnecessary bus master transfer */
562 RADEON_WRITE(R600_CP_RB_CNTL,
563#ifdef __BIG_ENDIAN
564 R600_BUF_SWAP_32BIT |
565#endif
566 RADEON_READ(R600_CP_RB_CNTL) |
567 R600_RB_NO_UPDATE);
568 RADEON_WRITE(R600_SCRATCH_UMSK, 0);
569 }
570}
571
572int r600_do_engine_reset(struct drm_device *dev)
573{
574 drm_radeon_private_t *dev_priv = dev->dev_private;
575 u32 cp_ptr, cp_me_cntl, cp_rb_cntl;
576
577 DRM_INFO("Resetting GPU\n");
578
579 cp_ptr = RADEON_READ(R600_CP_RB_WPTR);
580 cp_me_cntl = RADEON_READ(R600_CP_ME_CNTL);
581 RADEON_WRITE(R600_CP_ME_CNTL, R600_CP_ME_HALT);
582
583 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0x7fff);
584 RADEON_READ(R600_GRBM_SOFT_RESET);
585 DRM_UDELAY(50);
586 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
587 RADEON_READ(R600_GRBM_SOFT_RESET);
588
589 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
590 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
591 RADEON_WRITE(R600_CP_RB_CNTL,
592#ifdef __BIG_ENDIAN
593 R600_BUF_SWAP_32BIT |
594#endif
595 R600_RB_RPTR_WR_ENA);
596
597 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
598 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
599 RADEON_WRITE(R600_CP_RB_CNTL, cp_rb_cntl);
600 RADEON_WRITE(R600_CP_ME_CNTL, cp_me_cntl);
601
602 /* Reset the CP ring */
603 r600_do_cp_reset(dev_priv);
604
605 /* The CP is no longer running after an engine reset */
606 dev_priv->cp_running = 0;
607
608 /* Reset any pending vertex, indirect buffers */
609 radeon_freelist_reset(dev);
610
611 return 0;
612
613}
614
615static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
616 u32 num_backends,
617 u32 backend_disable_mask)
618{
619 u32 backend_map = 0;
620 u32 enabled_backends_mask;
621 u32 enabled_backends_count;
622 u32 cur_pipe;
623 u32 swizzle_pipe[R6XX_MAX_PIPES];
624 u32 cur_backend;
625 u32 i;
626
627 if (num_tile_pipes > R6XX_MAX_PIPES)
628 num_tile_pipes = R6XX_MAX_PIPES;
629 if (num_tile_pipes < 1)
630 num_tile_pipes = 1;
631 if (num_backends > R6XX_MAX_BACKENDS)
632 num_backends = R6XX_MAX_BACKENDS;
633 if (num_backends < 1)
634 num_backends = 1;
635
636 enabled_backends_mask = 0;
637 enabled_backends_count = 0;
638 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
639 if (((backend_disable_mask >> i) & 1) == 0) {
640 enabled_backends_mask |= (1 << i);
641 ++enabled_backends_count;
642 }
643 if (enabled_backends_count == num_backends)
644 break;
645 }
646
647 if (enabled_backends_count == 0) {
648 enabled_backends_mask = 1;
649 enabled_backends_count = 1;
650 }
651
652 if (enabled_backends_count != num_backends)
653 num_backends = enabled_backends_count;
654
655 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
656 switch (num_tile_pipes) {
657 case 1:
658 swizzle_pipe[0] = 0;
659 break;
660 case 2:
661 swizzle_pipe[0] = 0;
662 swizzle_pipe[1] = 1;
663 break;
664 case 3:
665 swizzle_pipe[0] = 0;
666 swizzle_pipe[1] = 1;
667 swizzle_pipe[2] = 2;
668 break;
669 case 4:
670 swizzle_pipe[0] = 0;
671 swizzle_pipe[1] = 1;
672 swizzle_pipe[2] = 2;
673 swizzle_pipe[3] = 3;
674 break;
675 case 5:
676 swizzle_pipe[0] = 0;
677 swizzle_pipe[1] = 1;
678 swizzle_pipe[2] = 2;
679 swizzle_pipe[3] = 3;
680 swizzle_pipe[4] = 4;
681 break;
682 case 6:
683 swizzle_pipe[0] = 0;
684 swizzle_pipe[1] = 2;
685 swizzle_pipe[2] = 4;
686 swizzle_pipe[3] = 5;
687 swizzle_pipe[4] = 1;
688 swizzle_pipe[5] = 3;
689 break;
690 case 7:
691 swizzle_pipe[0] = 0;
692 swizzle_pipe[1] = 2;
693 swizzle_pipe[2] = 4;
694 swizzle_pipe[3] = 6;
695 swizzle_pipe[4] = 1;
696 swizzle_pipe[5] = 3;
697 swizzle_pipe[6] = 5;
698 break;
699 case 8:
700 swizzle_pipe[0] = 0;
701 swizzle_pipe[1] = 2;
702 swizzle_pipe[2] = 4;
703 swizzle_pipe[3] = 6;
704 swizzle_pipe[4] = 1;
705 swizzle_pipe[5] = 3;
706 swizzle_pipe[6] = 5;
707 swizzle_pipe[7] = 7;
708 break;
709 }
710
711 cur_backend = 0;
712 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
713 while (((1 << cur_backend) & enabled_backends_mask) == 0)
714 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
715
716 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
717
718 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
719 }
720
721 return backend_map;
722}
723
724static int r600_count_pipe_bits(uint32_t val)
725{
726 return hweight32(val);
727}
728
729static void r600_gfx_init(struct drm_device *dev,
730 drm_radeon_private_t *dev_priv)
731{
732 int i, j, num_qd_pipes;
733 u32 sx_debug_1;
734 u32 tc_cntl;
735 u32 arb_pop;
736 u32 num_gs_verts_per_thread;
737 u32 vgt_gs_per_es;
738 u32 gs_prim_buffer_depth = 0;
739 u32 sq_ms_fifo_sizes;
740 u32 sq_config;
741 u32 sq_gpr_resource_mgmt_1 = 0;
742 u32 sq_gpr_resource_mgmt_2 = 0;
743 u32 sq_thread_resource_mgmt = 0;
744 u32 sq_stack_resource_mgmt_1 = 0;
745 u32 sq_stack_resource_mgmt_2 = 0;
746 u32 hdp_host_path_cntl;
747 u32 backend_map;
748 u32 gb_tiling_config = 0;
749 u32 cc_rb_backend_disable;
750 u32 cc_gc_shader_pipe_config;
751 u32 ramcfg;
752
753 /* setup chip specs */
754 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
755 case CHIP_R600:
756 dev_priv->r600_max_pipes = 4;
757 dev_priv->r600_max_tile_pipes = 8;
758 dev_priv->r600_max_simds = 4;
759 dev_priv->r600_max_backends = 4;
760 dev_priv->r600_max_gprs = 256;
761 dev_priv->r600_max_threads = 192;
762 dev_priv->r600_max_stack_entries = 256;
763 dev_priv->r600_max_hw_contexts = 8;
764 dev_priv->r600_max_gs_threads = 16;
765 dev_priv->r600_sx_max_export_size = 128;
766 dev_priv->r600_sx_max_export_pos_size = 16;
767 dev_priv->r600_sx_max_export_smx_size = 128;
768 dev_priv->r600_sq_num_cf_insts = 2;
769 break;
770 case CHIP_RV630:
771 case CHIP_RV635:
772 dev_priv->r600_max_pipes = 2;
773 dev_priv->r600_max_tile_pipes = 2;
774 dev_priv->r600_max_simds = 3;
775 dev_priv->r600_max_backends = 1;
776 dev_priv->r600_max_gprs = 128;
777 dev_priv->r600_max_threads = 192;
778 dev_priv->r600_max_stack_entries = 128;
779 dev_priv->r600_max_hw_contexts = 8;
780 dev_priv->r600_max_gs_threads = 4;
781 dev_priv->r600_sx_max_export_size = 128;
782 dev_priv->r600_sx_max_export_pos_size = 16;
783 dev_priv->r600_sx_max_export_smx_size = 128;
784 dev_priv->r600_sq_num_cf_insts = 2;
785 break;
786 case CHIP_RV610:
787 case CHIP_RS780:
788 case CHIP_RS880:
789 case CHIP_RV620:
790 dev_priv->r600_max_pipes = 1;
791 dev_priv->r600_max_tile_pipes = 1;
792 dev_priv->r600_max_simds = 2;
793 dev_priv->r600_max_backends = 1;
794 dev_priv->r600_max_gprs = 128;
795 dev_priv->r600_max_threads = 192;
796 dev_priv->r600_max_stack_entries = 128;
797 dev_priv->r600_max_hw_contexts = 4;
798 dev_priv->r600_max_gs_threads = 4;
799 dev_priv->r600_sx_max_export_size = 128;
800 dev_priv->r600_sx_max_export_pos_size = 16;
801 dev_priv->r600_sx_max_export_smx_size = 128;
802 dev_priv->r600_sq_num_cf_insts = 1;
803 break;
804 case CHIP_RV670:
805 dev_priv->r600_max_pipes = 4;
806 dev_priv->r600_max_tile_pipes = 4;
807 dev_priv->r600_max_simds = 4;
808 dev_priv->r600_max_backends = 4;
809 dev_priv->r600_max_gprs = 192;
810 dev_priv->r600_max_threads = 192;
811 dev_priv->r600_max_stack_entries = 256;
812 dev_priv->r600_max_hw_contexts = 8;
813 dev_priv->r600_max_gs_threads = 16;
814 dev_priv->r600_sx_max_export_size = 128;
815 dev_priv->r600_sx_max_export_pos_size = 16;
816 dev_priv->r600_sx_max_export_smx_size = 128;
817 dev_priv->r600_sq_num_cf_insts = 2;
818 break;
819 default:
820 break;
821 }
822
823 /* Initialize HDP */
824 j = 0;
825 for (i = 0; i < 32; i++) {
826 RADEON_WRITE((0x2c14 + j), 0x00000000);
827 RADEON_WRITE((0x2c18 + j), 0x00000000);
828 RADEON_WRITE((0x2c1c + j), 0x00000000);
829 RADEON_WRITE((0x2c20 + j), 0x00000000);
830 RADEON_WRITE((0x2c24 + j), 0x00000000);
831 j += 0x18;
832 }
833
834 RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
835
836 /* setup tiling, simd, pipe config */
837 ramcfg = RADEON_READ(R600_RAMCFG);
838
839 switch (dev_priv->r600_max_tile_pipes) {
840 case 1:
841 gb_tiling_config |= R600_PIPE_TILING(0);
842 break;
843 case 2:
844 gb_tiling_config |= R600_PIPE_TILING(1);
845 break;
846 case 4:
847 gb_tiling_config |= R600_PIPE_TILING(2);
848 break;
849 case 8:
850 gb_tiling_config |= R600_PIPE_TILING(3);
851 break;
852 default:
853 break;
854 }
855
856 gb_tiling_config |= R600_BANK_TILING((ramcfg >> R600_NOOFBANK_SHIFT) & R600_NOOFBANK_MASK);
857
858 gb_tiling_config |= R600_GROUP_SIZE(0);
859
860 if (((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK) > 3) {
861 gb_tiling_config |= R600_ROW_TILING(3);
862 gb_tiling_config |= R600_SAMPLE_SPLIT(3);
863 } else {
864 gb_tiling_config |=
865 R600_ROW_TILING(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
866 gb_tiling_config |=
867 R600_SAMPLE_SPLIT(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
868 }
869
870 gb_tiling_config |= R600_BANK_SWAPS(1);
871
872 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
873 cc_rb_backend_disable |=
874 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
875
876 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
877 cc_gc_shader_pipe_config |=
878 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
879 cc_gc_shader_pipe_config |=
880 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
881
882 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
883 (R6XX_MAX_BACKENDS -
884 r600_count_pipe_bits((cc_rb_backend_disable &
885 R6XX_MAX_BACKENDS_MASK) >> 16)),
886 (cc_rb_backend_disable >> 16));
887 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
888
889 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
890 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
891 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
892 if (gb_tiling_config & 0xc0) {
893 dev_priv->r600_group_size = 512;
894 } else {
895 dev_priv->r600_group_size = 256;
896 }
897 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
898 if (gb_tiling_config & 0x30) {
899 dev_priv->r600_nbanks = 8;
900 } else {
901 dev_priv->r600_nbanks = 4;
902 }
903
904 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
905 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
906 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
907
908 num_qd_pipes =
909 R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
910 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
911 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
912
913 /* set HW defaults for 3D engine */
914 RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
915 R600_ROQ_IB2_START(0x2b)));
916
917 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, (R600_MEQ_END(0x40) |
918 R600_ROQ_END(0x40)));
919
920 RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
921 R600_SYNC_GRADIENT |
922 R600_SYNC_WALKER |
923 R600_SYNC_ALIGNER));
924
925 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670)
926 RADEON_WRITE(R600_ARB_GDEC_RD_CNTL, 0x00000021);
927
928 sx_debug_1 = RADEON_READ(R600_SX_DEBUG_1);
929 sx_debug_1 |= R600_SMX_EVENT_RELEASE;
930 if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600))
931 sx_debug_1 |= R600_ENABLE_NEW_SMX_ADDRESS;
932 RADEON_WRITE(R600_SX_DEBUG_1, sx_debug_1);
933
934 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
935 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
936 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
937 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
938 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
939 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
940 RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE);
941 else
942 RADEON_WRITE(R600_DB_DEBUG, 0);
943
944 RADEON_WRITE(R600_DB_WATERMARKS, (R600_DEPTH_FREE(4) |
945 R600_DEPTH_FLUSH(16) |
946 R600_DEPTH_PENDING_FREE(4) |
947 R600_DEPTH_CACHELINE_FREE(16)));
948 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
949 RADEON_WRITE(R600_VGT_NUM_INSTANCES, 0);
950
951 RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
952 RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(0));
953
954 sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES);
955 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
956 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
957 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
958 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
959 sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) |
960 R600_FETCH_FIFO_HIWATER(0xa) |
961 R600_DONE_FIFO_HIWATER(0xe0) |
962 R600_ALU_UPDATE_FIFO_HIWATER(0x8));
963 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
964 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630)) {
965 sq_ms_fifo_sizes &= ~R600_DONE_FIFO_HIWATER(0xff);
966 sq_ms_fifo_sizes |= R600_DONE_FIFO_HIWATER(0x4);
967 }
968 RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
969
970 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
971 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
972 */
973 sq_config = RADEON_READ(R600_SQ_CONFIG);
974 sq_config &= ~(R600_PS_PRIO(3) |
975 R600_VS_PRIO(3) |
976 R600_GS_PRIO(3) |
977 R600_ES_PRIO(3));
978 sq_config |= (R600_DX9_CONSTS |
979 R600_VC_ENABLE |
980 R600_PS_PRIO(0) |
981 R600_VS_PRIO(1) |
982 R600_GS_PRIO(2) |
983 R600_ES_PRIO(3));
984
985 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) {
986 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(124) |
987 R600_NUM_VS_GPRS(124) |
988 R600_NUM_CLAUSE_TEMP_GPRS(4));
989 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(0) |
990 R600_NUM_ES_GPRS(0));
991 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(136) |
992 R600_NUM_VS_THREADS(48) |
993 R600_NUM_GS_THREADS(4) |
994 R600_NUM_ES_THREADS(4));
995 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(128) |
996 R600_NUM_VS_STACK_ENTRIES(128));
997 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(0) |
998 R600_NUM_ES_STACK_ENTRIES(0));
999 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
1000 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
1001 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
1002 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
1003 /* no vertex cache */
1004 sq_config &= ~R600_VC_ENABLE;
1005
1006 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
1007 R600_NUM_VS_GPRS(44) |
1008 R600_NUM_CLAUSE_TEMP_GPRS(2));
1009 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
1010 R600_NUM_ES_GPRS(17));
1011 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
1012 R600_NUM_VS_THREADS(78) |
1013 R600_NUM_GS_THREADS(4) |
1014 R600_NUM_ES_THREADS(31));
1015 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
1016 R600_NUM_VS_STACK_ENTRIES(40));
1017 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
1018 R600_NUM_ES_STACK_ENTRIES(16));
1019 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
1020 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV635)) {
1021 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
1022 R600_NUM_VS_GPRS(44) |
1023 R600_NUM_CLAUSE_TEMP_GPRS(2));
1024 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(18) |
1025 R600_NUM_ES_GPRS(18));
1026 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
1027 R600_NUM_VS_THREADS(78) |
1028 R600_NUM_GS_THREADS(4) |
1029 R600_NUM_ES_THREADS(31));
1030 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
1031 R600_NUM_VS_STACK_ENTRIES(40));
1032 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
1033 R600_NUM_ES_STACK_ENTRIES(16));
1034 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670) {
1035 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
1036 R600_NUM_VS_GPRS(44) |
1037 R600_NUM_CLAUSE_TEMP_GPRS(2));
1038 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
1039 R600_NUM_ES_GPRS(17));
1040 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
1041 R600_NUM_VS_THREADS(78) |
1042 R600_NUM_GS_THREADS(4) |
1043 R600_NUM_ES_THREADS(31));
1044 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(64) |
1045 R600_NUM_VS_STACK_ENTRIES(64));
1046 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(64) |
1047 R600_NUM_ES_STACK_ENTRIES(64));
1048 }
1049
1050 RADEON_WRITE(R600_SQ_CONFIG, sq_config);
1051 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1052 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1053 RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1054 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1055 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1056
1057 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
1058 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
1059 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
1060 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
1061 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY));
1062 else
1063 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC));
1064
1065 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_2S, (R600_S0_X(0xc) |
1066 R600_S0_Y(0x4) |
1067 R600_S1_X(0x4) |
1068 R600_S1_Y(0xc)));
1069 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_4S, (R600_S0_X(0xe) |
1070 R600_S0_Y(0xe) |
1071 R600_S1_X(0x2) |
1072 R600_S1_Y(0x2) |
1073 R600_S2_X(0xa) |
1074 R600_S2_Y(0x6) |
1075 R600_S3_X(0x6) |
1076 R600_S3_Y(0xa)));
1077 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0, (R600_S0_X(0xe) |
1078 R600_S0_Y(0xb) |
1079 R600_S1_X(0x4) |
1080 R600_S1_Y(0xc) |
1081 R600_S2_X(0x1) |
1082 R600_S2_Y(0x6) |
1083 R600_S3_X(0xa) |
1084 R600_S3_Y(0xe)));
1085 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1, (R600_S4_X(0x6) |
1086 R600_S4_Y(0x1) |
1087 R600_S5_X(0x0) |
1088 R600_S5_Y(0x0) |
1089 R600_S6_X(0xb) |
1090 R600_S6_Y(0x4) |
1091 R600_S7_X(0x7) |
1092 R600_S7_Y(0x8)));
1093
1094
1095 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1096 case CHIP_R600:
1097 case CHIP_RV630:
1098 case CHIP_RV635:
1099 gs_prim_buffer_depth = 0;
1100 break;
1101 case CHIP_RV610:
1102 case CHIP_RS780:
1103 case CHIP_RS880:
1104 case CHIP_RV620:
1105 gs_prim_buffer_depth = 32;
1106 break;
1107 case CHIP_RV670:
1108 gs_prim_buffer_depth = 128;
1109 break;
1110 default:
1111 break;
1112 }
1113
1114 num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
1115 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
1116 /* Max value for this is 256 */
1117 if (vgt_gs_per_es > 256)
1118 vgt_gs_per_es = 256;
1119
1120 RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
1121 RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
1122 RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
1123 RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
1124
1125 /* more default values. 2D/3D driver should adjust as needed */
1126 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
1127 RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
1128 RADEON_WRITE(R600_SX_MISC, 0);
1129 RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
1130 RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
1131 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
1132 RADEON_WRITE(R600_SPI_INPUT_Z, 0);
1133 RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
1134 RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
1135
1136 /* clear render buffer base addresses */
1137 RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
1138 RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
1139 RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
1140 RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
1141 RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
1142 RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
1143 RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
1144 RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
1145
1146 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1147 case CHIP_RV610:
1148 case CHIP_RS780:
1149 case CHIP_RS880:
1150 case CHIP_RV620:
1151 tc_cntl = R600_TC_L2_SIZE(8);
1152 break;
1153 case CHIP_RV630:
1154 case CHIP_RV635:
1155 tc_cntl = R600_TC_L2_SIZE(4);
1156 break;
1157 case CHIP_R600:
1158 tc_cntl = R600_TC_L2_SIZE(0) | R600_L2_DISABLE_LATE_HIT;
1159 break;
1160 default:
1161 tc_cntl = R600_TC_L2_SIZE(0);
1162 break;
1163 }
1164
1165 RADEON_WRITE(R600_TC_CNTL, tc_cntl);
1166
1167 hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
1168 RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1169
1170 arb_pop = RADEON_READ(R600_ARB_POP);
1171 arb_pop |= R600_ENABLE_TC128;
1172 RADEON_WRITE(R600_ARB_POP, arb_pop);
1173
1174 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
1175 RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
1176 R600_NUM_CLIP_SEQ(3)));
1177 RADEON_WRITE(R600_PA_SC_ENHANCE, R600_FORCE_EOV_MAX_CLK_CNT(4095));
1178
1179}
1180
1181static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
1182 u32 num_tile_pipes,
1183 u32 num_backends,
1184 u32 backend_disable_mask)
1185{
1186 u32 backend_map = 0;
1187 u32 enabled_backends_mask;
1188 u32 enabled_backends_count;
1189 u32 cur_pipe;
1190 u32 swizzle_pipe[R7XX_MAX_PIPES];
1191 u32 cur_backend;
1192 u32 i;
1193 bool force_no_swizzle;
1194
1195 if (num_tile_pipes > R7XX_MAX_PIPES)
1196 num_tile_pipes = R7XX_MAX_PIPES;
1197 if (num_tile_pipes < 1)
1198 num_tile_pipes = 1;
1199 if (num_backends > R7XX_MAX_BACKENDS)
1200 num_backends = R7XX_MAX_BACKENDS;
1201 if (num_backends < 1)
1202 num_backends = 1;
1203
1204 enabled_backends_mask = 0;
1205 enabled_backends_count = 0;
1206 for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
1207 if (((backend_disable_mask >> i) & 1) == 0) {
1208 enabled_backends_mask |= (1 << i);
1209 ++enabled_backends_count;
1210 }
1211 if (enabled_backends_count == num_backends)
1212 break;
1213 }
1214
1215 if (enabled_backends_count == 0) {
1216 enabled_backends_mask = 1;
1217 enabled_backends_count = 1;
1218 }
1219
1220 if (enabled_backends_count != num_backends)
1221 num_backends = enabled_backends_count;
1222
1223 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1224 case CHIP_RV770:
1225 case CHIP_RV730:
1226 force_no_swizzle = false;
1227 break;
1228 case CHIP_RV710:
1229 case CHIP_RV740:
1230 default:
1231 force_no_swizzle = true;
1232 break;
1233 }
1234
1235 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
1236 switch (num_tile_pipes) {
1237 case 1:
1238 swizzle_pipe[0] = 0;
1239 break;
1240 case 2:
1241 swizzle_pipe[0] = 0;
1242 swizzle_pipe[1] = 1;
1243 break;
1244 case 3:
1245 if (force_no_swizzle) {
1246 swizzle_pipe[0] = 0;
1247 swizzle_pipe[1] = 1;
1248 swizzle_pipe[2] = 2;
1249 } else {
1250 swizzle_pipe[0] = 0;
1251 swizzle_pipe[1] = 2;
1252 swizzle_pipe[2] = 1;
1253 }
1254 break;
1255 case 4:
1256 if (force_no_swizzle) {
1257 swizzle_pipe[0] = 0;
1258 swizzle_pipe[1] = 1;
1259 swizzle_pipe[2] = 2;
1260 swizzle_pipe[3] = 3;
1261 } else {
1262 swizzle_pipe[0] = 0;
1263 swizzle_pipe[1] = 2;
1264 swizzle_pipe[2] = 3;
1265 swizzle_pipe[3] = 1;
1266 }
1267 break;
1268 case 5:
1269 if (force_no_swizzle) {
1270 swizzle_pipe[0] = 0;
1271 swizzle_pipe[1] = 1;
1272 swizzle_pipe[2] = 2;
1273 swizzle_pipe[3] = 3;
1274 swizzle_pipe[4] = 4;
1275 } else {
1276 swizzle_pipe[0] = 0;
1277 swizzle_pipe[1] = 2;
1278 swizzle_pipe[2] = 4;
1279 swizzle_pipe[3] = 1;
1280 swizzle_pipe[4] = 3;
1281 }
1282 break;
1283 case 6:
1284 if (force_no_swizzle) {
1285 swizzle_pipe[0] = 0;
1286 swizzle_pipe[1] = 1;
1287 swizzle_pipe[2] = 2;
1288 swizzle_pipe[3] = 3;
1289 swizzle_pipe[4] = 4;
1290 swizzle_pipe[5] = 5;
1291 } else {
1292 swizzle_pipe[0] = 0;
1293 swizzle_pipe[1] = 2;
1294 swizzle_pipe[2] = 4;
1295 swizzle_pipe[3] = 5;
1296 swizzle_pipe[4] = 3;
1297 swizzle_pipe[5] = 1;
1298 }
1299 break;
1300 case 7:
1301 if (force_no_swizzle) {
1302 swizzle_pipe[0] = 0;
1303 swizzle_pipe[1] = 1;
1304 swizzle_pipe[2] = 2;
1305 swizzle_pipe[3] = 3;
1306 swizzle_pipe[4] = 4;
1307 swizzle_pipe[5] = 5;
1308 swizzle_pipe[6] = 6;
1309 } else {
1310 swizzle_pipe[0] = 0;
1311 swizzle_pipe[1] = 2;
1312 swizzle_pipe[2] = 4;
1313 swizzle_pipe[3] = 6;
1314 swizzle_pipe[4] = 3;
1315 swizzle_pipe[5] = 1;
1316 swizzle_pipe[6] = 5;
1317 }
1318 break;
1319 case 8:
1320 if (force_no_swizzle) {
1321 swizzle_pipe[0] = 0;
1322 swizzle_pipe[1] = 1;
1323 swizzle_pipe[2] = 2;
1324 swizzle_pipe[3] = 3;
1325 swizzle_pipe[4] = 4;
1326 swizzle_pipe[5] = 5;
1327 swizzle_pipe[6] = 6;
1328 swizzle_pipe[7] = 7;
1329 } else {
1330 swizzle_pipe[0] = 0;
1331 swizzle_pipe[1] = 2;
1332 swizzle_pipe[2] = 4;
1333 swizzle_pipe[3] = 6;
1334 swizzle_pipe[4] = 3;
1335 swizzle_pipe[5] = 1;
1336 swizzle_pipe[6] = 7;
1337 swizzle_pipe[7] = 5;
1338 }
1339 break;
1340 }
1341
1342 cur_backend = 0;
1343 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1344 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1345 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
1346
1347 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1348
1349 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
1350 }
1351
1352 return backend_map;
1353}
1354
1355static void r700_gfx_init(struct drm_device *dev,
1356 drm_radeon_private_t *dev_priv)
1357{
1358 int i, j, num_qd_pipes;
1359 u32 ta_aux_cntl;
1360 u32 sx_debug_1;
1361 u32 smx_dc_ctl0;
1362 u32 db_debug3;
1363 u32 num_gs_verts_per_thread;
1364 u32 vgt_gs_per_es;
1365 u32 gs_prim_buffer_depth = 0;
1366 u32 sq_ms_fifo_sizes;
1367 u32 sq_config;
1368 u32 sq_thread_resource_mgmt;
1369 u32 hdp_host_path_cntl;
1370 u32 sq_dyn_gpr_size_simd_ab_0;
1371 u32 backend_map;
1372 u32 gb_tiling_config = 0;
1373 u32 cc_rb_backend_disable;
1374 u32 cc_gc_shader_pipe_config;
1375 u32 mc_arb_ramcfg;
1376 u32 db_debug4;
1377
1378 /* setup chip specs */
1379 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1380 case CHIP_RV770:
1381 dev_priv->r600_max_pipes = 4;
1382 dev_priv->r600_max_tile_pipes = 8;
1383 dev_priv->r600_max_simds = 10;
1384 dev_priv->r600_max_backends = 4;
1385 dev_priv->r600_max_gprs = 256;
1386 dev_priv->r600_max_threads = 248;
1387 dev_priv->r600_max_stack_entries = 512;
1388 dev_priv->r600_max_hw_contexts = 8;
1389 dev_priv->r600_max_gs_threads = 16 * 2;
1390 dev_priv->r600_sx_max_export_size = 128;
1391 dev_priv->r600_sx_max_export_pos_size = 16;
1392 dev_priv->r600_sx_max_export_smx_size = 112;
1393 dev_priv->r600_sq_num_cf_insts = 2;
1394
1395 dev_priv->r700_sx_num_of_sets = 7;
1396 dev_priv->r700_sc_prim_fifo_size = 0xF9;
1397 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1398 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1399 break;
1400 case CHIP_RV730:
1401 dev_priv->r600_max_pipes = 2;
1402 dev_priv->r600_max_tile_pipes = 4;
1403 dev_priv->r600_max_simds = 8;
1404 dev_priv->r600_max_backends = 2;
1405 dev_priv->r600_max_gprs = 128;
1406 dev_priv->r600_max_threads = 248;
1407 dev_priv->r600_max_stack_entries = 256;
1408 dev_priv->r600_max_hw_contexts = 8;
1409 dev_priv->r600_max_gs_threads = 16 * 2;
1410 dev_priv->r600_sx_max_export_size = 256;
1411 dev_priv->r600_sx_max_export_pos_size = 32;
1412 dev_priv->r600_sx_max_export_smx_size = 224;
1413 dev_priv->r600_sq_num_cf_insts = 2;
1414
1415 dev_priv->r700_sx_num_of_sets = 7;
1416 dev_priv->r700_sc_prim_fifo_size = 0xf9;
1417 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1418 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1419 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1420 dev_priv->r600_sx_max_export_pos_size -= 16;
1421 dev_priv->r600_sx_max_export_smx_size += 16;
1422 }
1423 break;
1424 case CHIP_RV710:
1425 dev_priv->r600_max_pipes = 2;
1426 dev_priv->r600_max_tile_pipes = 2;
1427 dev_priv->r600_max_simds = 2;
1428 dev_priv->r600_max_backends = 1;
1429 dev_priv->r600_max_gprs = 256;
1430 dev_priv->r600_max_threads = 192;
1431 dev_priv->r600_max_stack_entries = 256;
1432 dev_priv->r600_max_hw_contexts = 4;
1433 dev_priv->r600_max_gs_threads = 8 * 2;
1434 dev_priv->r600_sx_max_export_size = 128;
1435 dev_priv->r600_sx_max_export_pos_size = 16;
1436 dev_priv->r600_sx_max_export_smx_size = 112;
1437 dev_priv->r600_sq_num_cf_insts = 1;
1438
1439 dev_priv->r700_sx_num_of_sets = 7;
1440 dev_priv->r700_sc_prim_fifo_size = 0x40;
1441 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1442 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1443 break;
1444 case CHIP_RV740:
1445 dev_priv->r600_max_pipes = 4;
1446 dev_priv->r600_max_tile_pipes = 4;
1447 dev_priv->r600_max_simds = 8;
1448 dev_priv->r600_max_backends = 4;
1449 dev_priv->r600_max_gprs = 256;
1450 dev_priv->r600_max_threads = 248;
1451 dev_priv->r600_max_stack_entries = 512;
1452 dev_priv->r600_max_hw_contexts = 8;
1453 dev_priv->r600_max_gs_threads = 16 * 2;
1454 dev_priv->r600_sx_max_export_size = 256;
1455 dev_priv->r600_sx_max_export_pos_size = 32;
1456 dev_priv->r600_sx_max_export_smx_size = 224;
1457 dev_priv->r600_sq_num_cf_insts = 2;
1458
1459 dev_priv->r700_sx_num_of_sets = 7;
1460 dev_priv->r700_sc_prim_fifo_size = 0x100;
1461 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1462 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1463
1464 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1465 dev_priv->r600_sx_max_export_pos_size -= 16;
1466 dev_priv->r600_sx_max_export_smx_size += 16;
1467 }
1468 break;
1469 default:
1470 break;
1471 }
1472
1473 /* Initialize HDP */
1474 j = 0;
1475 for (i = 0; i < 32; i++) {
1476 RADEON_WRITE((0x2c14 + j), 0x00000000);
1477 RADEON_WRITE((0x2c18 + j), 0x00000000);
1478 RADEON_WRITE((0x2c1c + j), 0x00000000);
1479 RADEON_WRITE((0x2c20 + j), 0x00000000);
1480 RADEON_WRITE((0x2c24 + j), 0x00000000);
1481 j += 0x18;
1482 }
1483
1484 RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
1485
1486 /* setup tiling, simd, pipe config */
1487 mc_arb_ramcfg = RADEON_READ(R700_MC_ARB_RAMCFG);
1488
1489 switch (dev_priv->r600_max_tile_pipes) {
1490 case 1:
1491 gb_tiling_config |= R600_PIPE_TILING(0);
1492 break;
1493 case 2:
1494 gb_tiling_config |= R600_PIPE_TILING(1);
1495 break;
1496 case 4:
1497 gb_tiling_config |= R600_PIPE_TILING(2);
1498 break;
1499 case 8:
1500 gb_tiling_config |= R600_PIPE_TILING(3);
1501 break;
1502 default:
1503 break;
1504 }
1505
1506 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
1507 gb_tiling_config |= R600_BANK_TILING(1);
1508 else
1509 gb_tiling_config |= R600_BANK_TILING((mc_arb_ramcfg >> R700_NOOFBANK_SHIFT) & R700_NOOFBANK_MASK);
1510
1511 gb_tiling_config |= R600_GROUP_SIZE(0);
1512
1513 if (((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK) > 3) {
1514 gb_tiling_config |= R600_ROW_TILING(3);
1515 gb_tiling_config |= R600_SAMPLE_SPLIT(3);
1516 } else {
1517 gb_tiling_config |=
1518 R600_ROW_TILING(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
1519 gb_tiling_config |=
1520 R600_SAMPLE_SPLIT(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
1521 }
1522
1523 gb_tiling_config |= R600_BANK_SWAPS(1);
1524
1525 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1526 cc_rb_backend_disable |=
1527 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
1528
1529 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1530 cc_gc_shader_pipe_config |=
1531 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
1532 cc_gc_shader_pipe_config |=
1533 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
1534
1535 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1536 backend_map = 0x28;
1537 else
1538 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
1539 dev_priv->r600_max_tile_pipes,
1540 (R7XX_MAX_BACKENDS -
1541 r600_count_pipe_bits((cc_rb_backend_disable &
1542 R7XX_MAX_BACKENDS_MASK) >> 16)),
1543 (cc_rb_backend_disable >> 16));
1544 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1545
1546 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
1547 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1548 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1549 if (gb_tiling_config & 0xc0) {
1550 dev_priv->r600_group_size = 512;
1551 } else {
1552 dev_priv->r600_group_size = 256;
1553 }
1554 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
1555 if (gb_tiling_config & 0x30) {
1556 dev_priv->r600_nbanks = 8;
1557 } else {
1558 dev_priv->r600_nbanks = 4;
1559 }
1560
1561 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1562 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1563 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1564
1565 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1566 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
1567 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
1568 RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
1569 RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
1570
1571 num_qd_pipes =
1572 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
1573 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
1574 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
1575
1576 /* set HW defaults for 3D engine */
1577 RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
1578 R600_ROQ_IB2_START(0x2b)));
1579
1580 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
1581
1582 ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
1583 RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
1584
1585 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
1586 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
1587 RADEON_WRITE(R700_SX_DEBUG_1, sx_debug_1);
1588
1589 smx_dc_ctl0 = RADEON_READ(R600_SMX_DC_CTL0);
1590 smx_dc_ctl0 &= ~R700_CACHE_DEPTH(0x1ff);
1591 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
1592 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
1593
1594 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
1595 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
1596 R700_GS_FLUSH_CTL(4) |
1597 R700_ACK_FLUSH_CTL(3) |
1598 R700_SYNC_FLUSH_CTL));
1599
1600 db_debug3 = RADEON_READ(R700_DB_DEBUG3);
1601 db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
1602 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1603 case CHIP_RV770:
1604 case CHIP_RV740:
1605 db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
1606 break;
1607 case CHIP_RV710:
1608 case CHIP_RV730:
1609 default:
1610 db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
1611 break;
1612 }
1613 RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
1614
1615 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
1616 db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
1617 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
1618 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
1619 }
1620
1621 RADEON_WRITE(R600_SX_EXPORT_BUFFER_SIZES, (R600_COLOR_BUFFER_SIZE((dev_priv->r600_sx_max_export_size / 4) - 1) |
1622 R600_POSITION_BUFFER_SIZE((dev_priv->r600_sx_max_export_pos_size / 4) - 1) |
1623 R600_SMX_BUFFER_SIZE((dev_priv->r600_sx_max_export_smx_size / 4) - 1)));
1624
1625 RADEON_WRITE(R700_PA_SC_FIFO_SIZE_R7XX, (R700_SC_PRIM_FIFO_SIZE(dev_priv->r700_sc_prim_fifo_size) |
1626 R700_SC_HIZ_TILE_FIFO_SIZE(dev_priv->r700_sc_hiz_tile_fifo_size) |
1627 R700_SC_EARLYZ_TILE_FIFO_SIZE(dev_priv->r700_sc_earlyz_tile_fifo_fize)));
1628
1629 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
1630
1631 RADEON_WRITE(R600_VGT_NUM_INSTANCES, 1);
1632
1633 RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
1634
1635 RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(4));
1636
1637 RADEON_WRITE(R600_CP_PERFMON_CNTL, 0);
1638
1639 sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(16 * dev_priv->r600_sq_num_cf_insts) |
1640 R600_DONE_FIFO_HIWATER(0xe0) |
1641 R600_ALU_UPDATE_FIFO_HIWATER(0x8));
1642 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1643 case CHIP_RV770:
1644 case CHIP_RV730:
1645 case CHIP_RV710:
1646 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
1647 break;
1648 case CHIP_RV740:
1649 default:
1650 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
1651 break;
1652 }
1653 RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
1654
1655 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1656 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1657 */
1658 sq_config = RADEON_READ(R600_SQ_CONFIG);
1659 sq_config &= ~(R600_PS_PRIO(3) |
1660 R600_VS_PRIO(3) |
1661 R600_GS_PRIO(3) |
1662 R600_ES_PRIO(3));
1663 sq_config |= (R600_DX9_CONSTS |
1664 R600_VC_ENABLE |
1665 R600_EXPORT_SRC_C |
1666 R600_PS_PRIO(0) |
1667 R600_VS_PRIO(1) |
1668 R600_GS_PRIO(2) |
1669 R600_ES_PRIO(3));
1670 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
1671 /* no vertex cache */
1672 sq_config &= ~R600_VC_ENABLE;
1673
1674 RADEON_WRITE(R600_SQ_CONFIG, sq_config);
1675
1676 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, (R600_NUM_PS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
1677 R600_NUM_VS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
1678 R600_NUM_CLAUSE_TEMP_GPRS(((dev_priv->r600_max_gprs * 24)/64)/2)));
1679
1680 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, (R600_NUM_GS_GPRS((dev_priv->r600_max_gprs * 7)/64) |
1681 R600_NUM_ES_GPRS((dev_priv->r600_max_gprs * 7)/64)));
1682
1683 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS((dev_priv->r600_max_threads * 4)/8) |
1684 R600_NUM_VS_THREADS((dev_priv->r600_max_threads * 2)/8) |
1685 R600_NUM_ES_THREADS((dev_priv->r600_max_threads * 1)/8));
1686 if (((dev_priv->r600_max_threads * 1) / 8) > dev_priv->r600_max_gs_threads)
1687 sq_thread_resource_mgmt |= R600_NUM_GS_THREADS(dev_priv->r600_max_gs_threads);
1688 else
1689 sq_thread_resource_mgmt |= R600_NUM_GS_THREADS((dev_priv->r600_max_gs_threads * 1)/8);
1690 RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1691
1692 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, (R600_NUM_PS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
1693 R600_NUM_VS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
1694
1695 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, (R600_NUM_GS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
1696 R600_NUM_ES_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
1697
1698 sq_dyn_gpr_size_simd_ab_0 = (R700_SIMDA_RING0((dev_priv->r600_max_gprs * 38)/64) |
1699 R700_SIMDA_RING1((dev_priv->r600_max_gprs * 38)/64) |
1700 R700_SIMDB_RING0((dev_priv->r600_max_gprs * 38)/64) |
1701 R700_SIMDB_RING1((dev_priv->r600_max_gprs * 38)/64));
1702
1703 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
1704 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
1705 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
1706 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
1707 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
1708 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
1709 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
1710 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
1711
1712 RADEON_WRITE(R700_PA_SC_FORCE_EOV_MAX_CNTS, (R700_FORCE_EOV_MAX_CLK_CNT(4095) |
1713 R700_FORCE_EOV_MAX_REZ_CNT(255)));
1714
1715 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
1716 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_TC_ONLY) |
1717 R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
1718 else
1719 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_VC_AND_TC) |
1720 R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
1721
1722 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1723 case CHIP_RV770:
1724 case CHIP_RV730:
1725 case CHIP_RV740:
1726 gs_prim_buffer_depth = 384;
1727 break;
1728 case CHIP_RV710:
1729 gs_prim_buffer_depth = 128;
1730 break;
1731 default:
1732 break;
1733 }
1734
1735 num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
1736 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
1737 /* Max value for this is 256 */
1738 if (vgt_gs_per_es > 256)
1739 vgt_gs_per_es = 256;
1740
1741 RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
1742 RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
1743 RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
1744
1745 /* more default values. 2D/3D driver should adjust as needed */
1746 RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
1747 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
1748 RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
1749 RADEON_WRITE(R600_SX_MISC, 0);
1750 RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
1751 RADEON_WRITE(R700_PA_SC_EDGERULE, 0xaaaaaaaa);
1752 RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
1753 RADEON_WRITE(R600_PA_SC_CLIPRECT_RULE, 0xffff);
1754 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
1755 RADEON_WRITE(R600_SPI_INPUT_Z, 0);
1756 RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
1757 RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
1758
1759 /* clear render buffer base addresses */
1760 RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
1761 RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
1762 RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
1763 RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
1764 RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
1765 RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
1766 RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
1767 RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
1768
1769 RADEON_WRITE(R700_TCP_CNTL, 0);
1770
1771 hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
1772 RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1773
1774 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
1775
1776 RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
1777 R600_NUM_CLIP_SEQ(3)));
1778
1779}
1780
1781static void r600_cp_init_ring_buffer(struct drm_device *dev,
1782 drm_radeon_private_t *dev_priv,
1783 struct drm_file *file_priv)
1784{
1785 struct drm_radeon_master_private *master_priv;
1786 u32 ring_start;
1787 u64 rptr_addr;
1788
1789 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
1790 r700_gfx_init(dev, dev_priv);
1791 else
1792 r600_gfx_init(dev, dev_priv);
1793
1794 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
1795 RADEON_READ(R600_GRBM_SOFT_RESET);
1796 mdelay(15);
1797 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
1798
1799
1800 /* Set ring buffer size */
1801#ifdef __BIG_ENDIAN
1802 RADEON_WRITE(R600_CP_RB_CNTL,
1803 R600_BUF_SWAP_32BIT |
1804 R600_RB_NO_UPDATE |
1805 (dev_priv->ring.rptr_update_l2qw << 8) |
1806 dev_priv->ring.size_l2qw);
1807#else
1808 RADEON_WRITE(R600_CP_RB_CNTL,
1809 RADEON_RB_NO_UPDATE |
1810 (dev_priv->ring.rptr_update_l2qw << 8) |
1811 dev_priv->ring.size_l2qw);
1812#endif
1813
1814 RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
1815
1816 /* Set the write pointer delay */
1817 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
1818
1819#ifdef __BIG_ENDIAN
1820 RADEON_WRITE(R600_CP_RB_CNTL,
1821 R600_BUF_SWAP_32BIT |
1822 R600_RB_NO_UPDATE |
1823 R600_RB_RPTR_WR_ENA |
1824 (dev_priv->ring.rptr_update_l2qw << 8) |
1825 dev_priv->ring.size_l2qw);
1826#else
1827 RADEON_WRITE(R600_CP_RB_CNTL,
1828 R600_RB_NO_UPDATE |
1829 R600_RB_RPTR_WR_ENA |
1830 (dev_priv->ring.rptr_update_l2qw << 8) |
1831 dev_priv->ring.size_l2qw);
1832#endif
1833
1834 /* Initialize the ring buffer's read and write pointers */
1835 RADEON_WRITE(R600_CP_RB_RPTR_WR, 0);
1836 RADEON_WRITE(R600_CP_RB_WPTR, 0);
1837 SET_RING_HEAD(dev_priv, 0);
1838 dev_priv->ring.tail = 0;
1839
1840#if IS_ENABLED(CONFIG_AGP)
1841 if (dev_priv->flags & RADEON_IS_AGP) {
1842 rptr_addr = dev_priv->ring_rptr->offset
1843 - dev->agp->base +
1844 dev_priv->gart_vm_start;
1845 } else
1846#endif
1847 {
1848 rptr_addr = dev_priv->ring_rptr->offset
1849 - ((unsigned long) dev->sg->virtual)
1850 + dev_priv->gart_vm_start;
1851 }
1852 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
1853 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
1854
1855#ifdef __BIG_ENDIAN
1856 RADEON_WRITE(R600_CP_RB_CNTL,
1857 RADEON_BUF_SWAP_32BIT |
1858 (dev_priv->ring.rptr_update_l2qw << 8) |
1859 dev_priv->ring.size_l2qw);
1860#else
1861 RADEON_WRITE(R600_CP_RB_CNTL,
1862 (dev_priv->ring.rptr_update_l2qw << 8) |
1863 dev_priv->ring.size_l2qw);
1864#endif
1865
1866#if IS_ENABLED(CONFIG_AGP)
1867 if (dev_priv->flags & RADEON_IS_AGP) {
1868 /* XXX */
1869 radeon_write_agp_base(dev_priv, dev->agp->base);
1870
1871 /* XXX */
1872 radeon_write_agp_location(dev_priv,
1873 (((dev_priv->gart_vm_start - 1 +
1874 dev_priv->gart_size) & 0xffff0000) |
1875 (dev_priv->gart_vm_start >> 16)));
1876
1877 ring_start = (dev_priv->cp_ring->offset
1878 - dev->agp->base
1879 + dev_priv->gart_vm_start);
1880 } else
1881#endif
1882 ring_start = (dev_priv->cp_ring->offset
1883 - (unsigned long)dev->sg->virtual
1884 + dev_priv->gart_vm_start);
1885
1886 RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8);
1887
1888 RADEON_WRITE(R600_CP_ME_CNTL, 0xff);
1889
1890 RADEON_WRITE(R600_CP_DEBUG, (1 << 27) | (1 << 28));
1891
1892 /* Initialize the scratch register pointer. This will cause
1893 * the scratch register values to be written out to memory
1894 * whenever they are updated.
1895 *
1896 * We simply put this behind the ring read pointer, this works
1897 * with PCI GART as well as (whatever kind of) AGP GART
1898 */
1899 {
1900 u64 scratch_addr;
1901
1902 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
1903 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
1904 scratch_addr += R600_SCRATCH_REG_OFFSET;
1905 scratch_addr >>= 8;
1906 scratch_addr &= 0xffffffff;
1907
1908 RADEON_WRITE(R600_SCRATCH_ADDR, (uint32_t)scratch_addr);
1909 }
1910
1911 RADEON_WRITE(R600_SCRATCH_UMSK, 0x7);
1912
1913 /* Turn on bus mastering */
1914 radeon_enable_bm(dev_priv);
1915
1916 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(0), 0);
1917 RADEON_WRITE(R600_LAST_FRAME_REG, 0);
1918
1919 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
1920 RADEON_WRITE(R600_LAST_DISPATCH_REG, 0);
1921
1922 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(2), 0);
1923 RADEON_WRITE(R600_LAST_CLEAR_REG, 0);
1924
1925 /* reset sarea copies of these */
1926 master_priv = file_priv->master->driver_priv;
1927 if (master_priv->sarea_priv) {
1928 master_priv->sarea_priv->last_frame = 0;
1929 master_priv->sarea_priv->last_dispatch = 0;
1930 master_priv->sarea_priv->last_clear = 0;
1931 }
1932
1933 r600_do_wait_for_idle(dev_priv);
1934
1935}
1936
1937int r600_do_cleanup_cp(struct drm_device *dev)
1938{
1939 drm_radeon_private_t *dev_priv = dev->dev_private;
1940 DRM_DEBUG("\n");
1941
1942 /* Make sure interrupts are disabled here because the uninstall ioctl
1943 * may not have been called from userspace and after dev_private
1944 * is freed, it's too late.
1945 */
1946 if (dev->irq_enabled)
1947 drm_irq_uninstall(dev);
1948
1949#if IS_ENABLED(CONFIG_AGP)
1950 if (dev_priv->flags & RADEON_IS_AGP) {
1951 if (dev_priv->cp_ring != NULL) {
1952 drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
1953 dev_priv->cp_ring = NULL;
1954 }
1955 if (dev_priv->ring_rptr != NULL) {
1956 drm_legacy_ioremapfree(dev_priv->ring_rptr, dev);
1957 dev_priv->ring_rptr = NULL;
1958 }
1959 if (dev->agp_buffer_map != NULL) {
1960 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
1961 dev->agp_buffer_map = NULL;
1962 }
1963 } else
1964#endif
1965 {
1966
1967 if (dev_priv->gart_info.bus_addr)
1968 r600_page_table_cleanup(dev, &dev_priv->gart_info);
1969
1970 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
1971 drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev);
1972 dev_priv->gart_info.addr = NULL;
1973 }
1974 }
1975 /* only clear to the start of flags */
1976 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1977
1978 return 0;
1979}
1980
1981int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1982 struct drm_file *file_priv)
1983{
1984 drm_radeon_private_t *dev_priv = dev->dev_private;
1985 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
1986
1987 DRM_DEBUG("\n");
1988
1989 mutex_init(&dev_priv->cs_mutex);
1990 r600_cs_legacy_init();
1991 /* if we require new memory map but we don't have it fail */
1992 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
1993 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
1994 r600_do_cleanup_cp(dev);
1995 return -EINVAL;
1996 }
1997
1998 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
1999 DRM_DEBUG("Forcing AGP card to PCI mode\n");
2000 dev_priv->flags &= ~RADEON_IS_AGP;
2001 /* The writeback test succeeds, but when writeback is enabled,
2002 * the ring buffer read ptr update fails after first 128 bytes.
2003 */
2004 radeon_no_wb = 1;
2005 } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
2006 && !init->is_pci) {
2007 DRM_DEBUG("Restoring AGP flag\n");
2008 dev_priv->flags |= RADEON_IS_AGP;
2009 }
2010
2011 dev_priv->usec_timeout = init->usec_timeout;
2012 if (dev_priv->usec_timeout < 1 ||
2013 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
2014 DRM_DEBUG("TIMEOUT problem!\n");
2015 r600_do_cleanup_cp(dev);
2016 return -EINVAL;
2017 }
2018
2019 /* Enable vblank on CRTC1 for older X servers
2020 */
2021 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
2022 dev_priv->do_boxes = 0;
2023 dev_priv->cp_mode = init->cp_mode;
2024
2025 /* We don't support anything other than bus-mastering ring mode,
2026 * but the ring can be in either AGP or PCI space for the ring
2027 * read pointer.
2028 */
2029 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
2030 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
2031 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
2032 r600_do_cleanup_cp(dev);
2033 return -EINVAL;
2034 }
2035
2036 switch (init->fb_bpp) {
2037 case 16:
2038 dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
2039 break;
2040 case 32:
2041 default:
2042 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
2043 break;
2044 }
2045 dev_priv->front_offset = init->front_offset;
2046 dev_priv->front_pitch = init->front_pitch;
2047 dev_priv->back_offset = init->back_offset;
2048 dev_priv->back_pitch = init->back_pitch;
2049
2050 dev_priv->ring_offset = init->ring_offset;
2051 dev_priv->ring_rptr_offset = init->ring_rptr_offset;
2052 dev_priv->buffers_offset = init->buffers_offset;
2053 dev_priv->gart_textures_offset = init->gart_textures_offset;
2054
2055 master_priv->sarea = drm_legacy_getsarea(dev);
2056 if (!master_priv->sarea) {
2057 DRM_ERROR("could not find sarea!\n");
2058 r600_do_cleanup_cp(dev);
2059 return -EINVAL;
2060 }
2061
2062 dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset);
2063 if (!dev_priv->cp_ring) {
2064 DRM_ERROR("could not find cp ring region!\n");
2065 r600_do_cleanup_cp(dev);
2066 return -EINVAL;
2067 }
2068 dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset);
2069 if (!dev_priv->ring_rptr) {
2070 DRM_ERROR("could not find ring read pointer!\n");
2071 r600_do_cleanup_cp(dev);
2072 return -EINVAL;
2073 }
2074 dev->agp_buffer_token = init->buffers_offset;
2075 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
2076 if (!dev->agp_buffer_map) {
2077 DRM_ERROR("could not find dma buffer region!\n");
2078 r600_do_cleanup_cp(dev);
2079 return -EINVAL;
2080 }
2081
2082 if (init->gart_textures_offset) {
2083 dev_priv->gart_textures =
2084 drm_legacy_findmap(dev, init->gart_textures_offset);
2085 if (!dev_priv->gart_textures) {
2086 DRM_ERROR("could not find GART texture region!\n");
2087 r600_do_cleanup_cp(dev);
2088 return -EINVAL;
2089 }
2090 }
2091
2092#if IS_ENABLED(CONFIG_AGP)
2093 /* XXX */
2094 if (dev_priv->flags & RADEON_IS_AGP) {
2095 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
2096 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
2097 drm_legacy_ioremap_wc(dev->agp_buffer_map, dev);
2098 if (!dev_priv->cp_ring->handle ||
2099 !dev_priv->ring_rptr->handle ||
2100 !dev->agp_buffer_map->handle) {
2101 DRM_ERROR("could not find ioremap agp regions!\n");
2102 r600_do_cleanup_cp(dev);
2103 return -EINVAL;
2104 }
2105 } else
2106#endif
2107 {
2108 dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
2109 dev_priv->ring_rptr->handle =
2110 (void *)(unsigned long)dev_priv->ring_rptr->offset;
2111 dev->agp_buffer_map->handle =
2112 (void *)(unsigned long)dev->agp_buffer_map->offset;
2113
2114 DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
2115 dev_priv->cp_ring->handle);
2116 DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
2117 dev_priv->ring_rptr->handle);
2118 DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
2119 dev->agp_buffer_map->handle);
2120 }
2121
2122 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24;
2123 dev_priv->fb_size =
2124 (((radeon_read_fb_location(dev_priv) & 0xffff0000u) << 8) + 0x1000000)
2125 - dev_priv->fb_location;
2126
2127 dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
2128 ((dev_priv->front_offset
2129 + dev_priv->fb_location) >> 10));
2130
2131 dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
2132 ((dev_priv->back_offset
2133 + dev_priv->fb_location) >> 10));
2134
2135 dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
2136 ((dev_priv->depth_offset
2137 + dev_priv->fb_location) >> 10));
2138
2139 dev_priv->gart_size = init->gart_size;
2140
2141 /* New let's set the memory map ... */
2142 if (dev_priv->new_memmap) {
2143 u32 base = 0;
2144
2145 DRM_INFO("Setting GART location based on new memory map\n");
2146
2147 /* If using AGP, try to locate the AGP aperture at the same
2148 * location in the card and on the bus, though we have to
2149 * align it down.
2150 */
2151#if IS_ENABLED(CONFIG_AGP)
2152 /* XXX */
2153 if (dev_priv->flags & RADEON_IS_AGP) {
2154 base = dev->agp->base;
2155 /* Check if valid */
2156 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
2157 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
2158 DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
2159 dev->agp->base);
2160 base = 0;
2161 }
2162 }
2163#endif
2164 /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
2165 if (base == 0) {
2166 base = dev_priv->fb_location + dev_priv->fb_size;
2167 if (base < dev_priv->fb_location ||
2168 ((base + dev_priv->gart_size) & 0xfffffffful) < base)
2169 base = dev_priv->fb_location
2170 - dev_priv->gart_size;
2171 }
2172 dev_priv->gart_vm_start = base & 0xffc00000u;
2173 if (dev_priv->gart_vm_start != base)
2174 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
2175 base, dev_priv->gart_vm_start);
2176 }
2177
2178#if IS_ENABLED(CONFIG_AGP)
2179 /* XXX */
2180 if (dev_priv->flags & RADEON_IS_AGP)
2181 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
2182 - dev->agp->base
2183 + dev_priv->gart_vm_start);
2184 else
2185#endif
2186 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
2187 - (unsigned long)dev->sg->virtual
2188 + dev_priv->gart_vm_start);
2189
2190 DRM_DEBUG("fb 0x%08x size %d\n",
2191 (unsigned int) dev_priv->fb_location,
2192 (unsigned int) dev_priv->fb_size);
2193 DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
2194 DRM_DEBUG("dev_priv->gart_vm_start 0x%08x\n",
2195 (unsigned int) dev_priv->gart_vm_start);
2196 DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n",
2197 dev_priv->gart_buffers_offset);
2198
2199 dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
2201 + init->ring_size / sizeof(u32));
2202 dev_priv->ring.size = init->ring_size;
2203 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
2204
2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
2206 dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
2207
2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
2209 dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
2210
2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
2212
2213 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
2214
2215#if IS_ENABLED(CONFIG_AGP)
2216 if (dev_priv->flags & RADEON_IS_AGP) {
2217 /* XXX turn off pcie gart */
2218 } else
2219#endif
2220 {
2221 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
2222 /* if we have an offset set from userspace */
2223 if (!dev_priv->pcigart_offset_set) {
2224 DRM_ERROR("Need gart offset from userspace\n");
2225 r600_do_cleanup_cp(dev);
2226 return -EINVAL;
2227 }
2228
2229 DRM_DEBUG("Using gart offset 0x%08lx\n", dev_priv->pcigart_offset);
2230
2231 dev_priv->gart_info.bus_addr =
2232 dev_priv->pcigart_offset + dev_priv->fb_location;
2233 dev_priv->gart_info.mapping.offset =
2234 dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
2235 dev_priv->gart_info.mapping.size =
2236 dev_priv->gart_info.table_size;
2237
2238 drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev);
2239 if (!dev_priv->gart_info.mapping.handle) {
2240 DRM_ERROR("ioremap failed.\n");
2241 r600_do_cleanup_cp(dev);
2242 return -EINVAL;
2243 }
2244
2245 dev_priv->gart_info.addr =
2246 dev_priv->gart_info.mapping.handle;
2247
2248 DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
2249 dev_priv->gart_info.addr,
2250 dev_priv->pcigart_offset);
2251
2252 if (!r600_page_table_init(dev)) {
2253 DRM_ERROR("Failed to init GART table\n");
2254 r600_do_cleanup_cp(dev);
2255 return -EINVAL;
2256 }
2257
2258 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
2259 r700_vm_init(dev);
2260 else
2261 r600_vm_init(dev);
2262 }
2263
2264 if (!dev_priv->me_fw || !dev_priv->pfp_fw) {
2265 int err = r600_cp_init_microcode(dev_priv);
2266 if (err) {
2267 DRM_ERROR("Failed to load firmware!\n");
2268 r600_do_cleanup_cp(dev);
2269 return err;
2270 }
2271 }
2272 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
2273 r700_cp_load_microcode(dev_priv);
2274 else
2275 r600_cp_load_microcode(dev_priv);
2276
2277 r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
2278
2279 dev_priv->last_buf = 0;
2280
2281 r600_do_engine_reset(dev);
2282 r600_test_writeback(dev_priv);
2283
2284 return 0;
2285}
2286
2287int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
2288{
2289 drm_radeon_private_t *dev_priv = dev->dev_private;
2290
2291 DRM_DEBUG("\n");
2292 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) {
2293 r700_vm_init(dev);
2294 r700_cp_load_microcode(dev_priv);
2295 } else {
2296 r600_vm_init(dev);
2297 r600_cp_load_microcode(dev_priv);
2298 }
2299 r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
2300 r600_do_engine_reset(dev);
2301
2302 return 0;
2303}
2304
2305/* Wait for the CP to go idle.
2306 */
2307int r600_do_cp_idle(drm_radeon_private_t *dev_priv)
2308{
2309 RING_LOCALS;
2310 DRM_DEBUG("\n");
2311
2312 BEGIN_RING(5);
2313 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
2314 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
2315 /* wait for 3D idle clean */
2316 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
2317 OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
2318 OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
2319
2320 ADVANCE_RING();
2321 COMMIT_RING();
2322
2323 return r600_do_wait_for_idle(dev_priv);
2324}
2325
2326/* Start the Command Processor.
2327 */
2328void r600_do_cp_start(drm_radeon_private_t *dev_priv)
2329{
2330 u32 cp_me;
2331 RING_LOCALS;
2332 DRM_DEBUG("\n");
2333
2334 BEGIN_RING(7);
2335 OUT_RING(CP_PACKET3(R600_IT_ME_INITIALIZE, 5));
2336 OUT_RING(0x00000001);
2337 if (((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770))
2338 OUT_RING(0x00000003);
2339 else
2340 OUT_RING(0x00000000);
2341 OUT_RING((dev_priv->r600_max_hw_contexts - 1));
2342 OUT_RING(R600_ME_INITIALIZE_DEVICE_ID(1));
2343 OUT_RING(0x00000000);
2344 OUT_RING(0x00000000);
2345 ADVANCE_RING();
2346 COMMIT_RING();
2347
2348 /* set the mux and reset the halt bit */
2349 cp_me = 0xff;
2350 RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
2351
2352 dev_priv->cp_running = 1;
2353
2354}
2355
2356void r600_do_cp_reset(drm_radeon_private_t *dev_priv)
2357{
2358 u32 cur_read_ptr;
2359 DRM_DEBUG("\n");
2360
2361 cur_read_ptr = RADEON_READ(R600_CP_RB_RPTR);
2362 RADEON_WRITE(R600_CP_RB_WPTR, cur_read_ptr);
2363 SET_RING_HEAD(dev_priv, cur_read_ptr);
2364 dev_priv->ring.tail = cur_read_ptr;
2365}
2366
2367void r600_do_cp_stop(drm_radeon_private_t *dev_priv)
2368{
2369 uint32_t cp_me;
2370
2371 DRM_DEBUG("\n");
2372
2373 cp_me = 0xff | R600_CP_ME_HALT;
2374
2375 RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
2376
2377 dev_priv->cp_running = 0;
2378}
2379
2380int r600_cp_dispatch_indirect(struct drm_device *dev,
2381 struct drm_buf *buf, int start, int end)
2382{
2383 drm_radeon_private_t *dev_priv = dev->dev_private;
2384 RING_LOCALS;
2385
2386 if (start != end) {
2387 unsigned long offset = (dev_priv->gart_buffers_offset
2388 + buf->offset + start);
2389 int dwords = (end - start + 3) / sizeof(u32);
2390
2391 DRM_DEBUG("dwords:%d\n", dwords);
2392 DRM_DEBUG("offset 0x%lx\n", offset);
2393
2394
2395 /* Indirect buffer data must be a multiple of 16 dwords.
2396 * pad the data with a Type-2 CP packet.
2397 */
2398 while (dwords & 0xf) {
2399 u32 *data = (u32 *)
2400 ((char *)dev->agp_buffer_map->handle
2401 + buf->offset + start);
2402 data[dwords++] = RADEON_CP_PACKET2;
2403 }
2404
2405 /* Fire off the indirect buffer */
2406 BEGIN_RING(4);
2407 OUT_RING(CP_PACKET3(R600_IT_INDIRECT_BUFFER, 2));
2408 OUT_RING((offset & 0xfffffffc));
2409 OUT_RING((upper_32_bits(offset) & 0xff));
2410 OUT_RING(dwords);
2411 ADVANCE_RING();
2412 }
2413
2414 return 0;
2415}
2416
2417void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
2418{
2419 drm_radeon_private_t *dev_priv = dev->dev_private;
2420 struct drm_master *master = file_priv->master;
2421 struct drm_radeon_master_private *master_priv = master->driver_priv;
2422 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
2423 int nbox = sarea_priv->nbox;
2424 struct drm_clip_rect *pbox = sarea_priv->boxes;
2425 int i, cpp, src_pitch, dst_pitch;
2426 uint64_t src, dst;
2427 RING_LOCALS;
2428 DRM_DEBUG("\n");
2429
2430 if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888)
2431 cpp = 4;
2432 else
2433 cpp = 2;
2434
2435 if (sarea_priv->pfCurrentPage == 0) {
2436 src_pitch = dev_priv->back_pitch;
2437 dst_pitch = dev_priv->front_pitch;
2438 src = dev_priv->back_offset + dev_priv->fb_location;
2439 dst = dev_priv->front_offset + dev_priv->fb_location;
2440 } else {
2441 src_pitch = dev_priv->front_pitch;
2442 dst_pitch = dev_priv->back_pitch;
2443 src = dev_priv->front_offset + dev_priv->fb_location;
2444 dst = dev_priv->back_offset + dev_priv->fb_location;
2445 }
2446
2447 if (r600_prepare_blit_copy(dev, file_priv)) {
2448 DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
2449 return;
2450 }
2451 for (i = 0; i < nbox; i++) {
2452 int x = pbox[i].x1;
2453 int y = pbox[i].y1;
2454 int w = pbox[i].x2 - x;
2455 int h = pbox[i].y2 - y;
2456
2457 DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
2458
2459 r600_blit_swap(dev,
2460 src, dst,
2461 x, y, x, y, w, h,
2462 src_pitch, dst_pitch, cpp);
2463 }
2464 r600_done_blit_copy(dev);
2465
2466 /* Increment the frame counter. The client-side 3D driver must
2467 * throttle the framerate by waiting for this value before
2468 * performing the swapbuffer ioctl.
2469 */
2470 sarea_priv->last_frame++;
2471
2472 BEGIN_RING(3);
2473 R600_FRAME_AGE(sarea_priv->last_frame);
2474 ADVANCE_RING();
2475}
2476
2477int r600_cp_dispatch_texture(struct drm_device *dev,
2478 struct drm_file *file_priv,
2479 drm_radeon_texture_t *tex,
2480 drm_radeon_tex_image_t *image)
2481{
2482 drm_radeon_private_t *dev_priv = dev->dev_private;
2483 struct drm_buf *buf;
2484 u32 *buffer;
2485 const u8 __user *data;
2486 unsigned int size, pass_size;
2487 u64 src_offset, dst_offset;
2488
2489 if (!radeon_check_offset(dev_priv, tex->offset)) {
2490 DRM_ERROR("Invalid destination offset\n");
2491 return -EINVAL;
2492 }
2493
2494 /* this might fail for zero-sized uploads - are those illegal? */
2495 if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) {
2496 DRM_ERROR("Invalid final destination offset\n");
2497 return -EINVAL;
2498 }
2499
2500 size = tex->height * tex->pitch;
2501
2502 if (size == 0)
2503 return 0;
2504
2505 dst_offset = tex->offset;
2506
2507 if (r600_prepare_blit_copy(dev, file_priv)) {
2508 DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
2509 return -EAGAIN;
2510 }
2511 do {
2512 data = (const u8 __user *)image->data;
2513 pass_size = size;
2514
2515 buf = radeon_freelist_get(dev);
2516 if (!buf) {
2517 DRM_DEBUG("EAGAIN\n");
2518 if (copy_to_user(tex->image, image, sizeof(*image)))
2519 return -EFAULT;
2520 return -EAGAIN;
2521 }
2522
2523 if (pass_size > buf->total)
2524 pass_size = buf->total;
2525
2526 /* Dispatch the indirect buffer.
2527 */
2528 buffer =
2529 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
2530
2531 if (copy_from_user(buffer, data, pass_size)) {
2532 DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
2533 return -EFAULT;
2534 }
2535
2536 buf->file_priv = file_priv;
2537 buf->used = pass_size;
2538 src_offset = dev_priv->gart_buffers_offset + buf->offset;
2539
2540 r600_blit_copy(dev, src_offset, dst_offset, pass_size);
2541
2542 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2543
2544 /* Update the input parameters for next time */
2545 image->data = (const u8 __user *)image->data + pass_size;
2546 dst_offset += pass_size;
2547 size -= pass_size;
2548 } while (size > 0);
2549 r600_done_blit_copy(dev);
2550
2551 return 0;
2552}
2553
2554/*
2555 * Legacy cs ioctl
2556 */
2557static u32 radeon_cs_id_get(struct drm_radeon_private *radeon)
2558{
2559 /* FIXME: check if wrap affect last reported wrap & sequence */
2560 radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF;
2561 if (!radeon->cs_id_scnt) {
2562 /* increment wrap counter */
2563 radeon->cs_id_wcnt += 0x01000000;
2564 /* valid sequence counter start at 1 */
2565 radeon->cs_id_scnt = 1;
2566 }
2567 return (radeon->cs_id_scnt | radeon->cs_id_wcnt);
2568}
2569
2570static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id)
2571{
2572 RING_LOCALS;
2573
2574 *id = radeon_cs_id_get(dev_priv);
2575
2576 /* SCRATCH 2 */
2577 BEGIN_RING(3);
2578 R600_CLEAR_AGE(*id);
2579 ADVANCE_RING();
2580 COMMIT_RING();
2581}
2582
2583static int r600_ib_get(struct drm_device *dev,
2584 struct drm_file *fpriv,
2585 struct drm_buf **buffer)
2586{
2587 struct drm_buf *buf;
2588
2589 *buffer = NULL;
2590 buf = radeon_freelist_get(dev);
2591 if (!buf) {
2592 return -EBUSY;
2593 }
2594 buf->file_priv = fpriv;
2595 *buffer = buf;
2596 return 0;
2597}
2598
2599static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
2600 struct drm_file *fpriv, int l, int r)
2601{
2602 drm_radeon_private_t *dev_priv = dev->dev_private;
2603
2604 if (buf) {
2605 if (!r)
2606 r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
2607 radeon_cp_discard_buffer(dev, fpriv->master, buf);
2608 COMMIT_RING();
2609 }
2610}
2611
2612int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
2613{
2614 struct drm_radeon_private *dev_priv = dev->dev_private;
2615 struct drm_radeon_cs *cs = data;
2616 struct drm_buf *buf;
2617 unsigned family;
2618 int l, r = 0;
2619 u32 *ib, cs_id = 0;
2620
2621 if (dev_priv == NULL) {
2622 DRM_ERROR("called with no initialization\n");
2623 return -EINVAL;
2624 }
2625 family = dev_priv->flags & RADEON_FAMILY_MASK;
2626 if (family < CHIP_R600) {
2627 DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n");
2628 return -EINVAL;
2629 }
2630 mutex_lock(&dev_priv->cs_mutex);
2631 /* get ib */
2632 r = r600_ib_get(dev, fpriv, &buf);
2633 if (r) {
2634 DRM_ERROR("ib_get failed\n");
2635 goto out;
2636 }
2637 ib = dev->agp_buffer_map->handle + buf->offset;
2638 /* now parse command stream */
2639 r = r600_cs_legacy(dev, data, fpriv, family, ib, &l);
2640 if (r) {
2641 goto out;
2642 }
2643
2644out:
2645 r600_ib_free(dev, buf, fpriv, l, r);
2646 /* emit cs id sequence */
2647 r600_cs_id_emit(dev_priv, &cs_id);
2648 cs->cs_id = cs_id;
2649 mutex_unlock(&dev_priv->cs_mutex);
2650 return r;
2651}
2652
2653void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
2654{
2655 struct drm_radeon_private *dev_priv = dev->dev_private;
2656
2657 *npipes = dev_priv->r600_npipes;
2658 *nbanks = dev_priv->r600_nbanks;
2659 *group_size = dev_priv->r600_group_size;
2660}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index acc1f99c84d9..2f36fa1576e0 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2328,101 +2328,6 @@ int r600_cs_parse(struct radeon_cs_parser *p)
2328 return 0; 2328 return 0;
2329} 2329}
2330 2330
2331#ifdef CONFIG_DRM_RADEON_UMS
2332
2333/**
2334 * cs_parser_fini() - clean parser states
2335 * @parser: parser structure holding parsing context.
2336 * @error: error number
2337 *
2338 * If error is set than unvalidate buffer, otherwise just free memory
2339 * used by parsing context.
2340 **/
2341static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2342{
2343 unsigned i;
2344
2345 kfree(parser->relocs);
2346 for (i = 0; i < parser->nchunks; i++)
2347 drm_free_large(parser->chunks[i].kdata);
2348 kfree(parser->chunks);
2349 kfree(parser->chunks_array);
2350}
2351
2352static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2353{
2354 if (p->chunk_relocs == NULL) {
2355 return 0;
2356 }
2357 p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
2358 if (p->relocs == NULL) {
2359 return -ENOMEM;
2360 }
2361 return 0;
2362}
2363
2364int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2365 unsigned family, u32 *ib, int *l)
2366{
2367 struct radeon_cs_parser parser;
2368 struct radeon_cs_chunk *ib_chunk;
2369 struct r600_cs_track *track;
2370 int r;
2371
2372 /* initialize tracker */
2373 track = kzalloc(sizeof(*track), GFP_KERNEL);
2374 if (track == NULL)
2375 return -ENOMEM;
2376 r600_cs_track_init(track);
2377 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2378 /* initialize parser */
2379 memset(&parser, 0, sizeof(struct radeon_cs_parser));
2380 parser.filp = filp;
2381 parser.dev = &dev->pdev->dev;
2382 parser.rdev = NULL;
2383 parser.family = family;
2384 parser.track = track;
2385 parser.ib.ptr = ib;
2386 r = radeon_cs_parser_init(&parser, data);
2387 if (r) {
2388 DRM_ERROR("Failed to initialize parser !\n");
2389 r600_cs_parser_fini(&parser, r);
2390 return r;
2391 }
2392 r = r600_cs_parser_relocs_legacy(&parser);
2393 if (r) {
2394 DRM_ERROR("Failed to parse relocation !\n");
2395 r600_cs_parser_fini(&parser, r);
2396 return r;
2397 }
2398 /* Copy the packet into the IB, the parser will read from the
2399 * input memory (cached) and write to the IB (which can be
2400 * uncached). */
2401 ib_chunk = parser.chunk_ib;
2402 parser.ib.length_dw = ib_chunk->length_dw;
2403 *l = parser.ib.length_dw;
2404 if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
2405 r = -EFAULT;
2406 r600_cs_parser_fini(&parser, r);
2407 return r;
2408 }
2409 r = r600_cs_parse(&parser);
2410 if (r) {
2411 DRM_ERROR("Invalid command stream !\n");
2412 r600_cs_parser_fini(&parser, r);
2413 return r;
2414 }
2415 r600_cs_parser_fini(&parser, r);
2416 return r;
2417}
2418
2419void r600_cs_legacy_init(void)
2420{
2421 r600_nomm = 1;
2422}
2423
2424#endif
2425
2426/* 2331/*
2427 * DMA 2332 * DMA
2428 */ 2333 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 87db64983ea8..5ae6db98aa4d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -268,6 +268,7 @@ struct radeon_clock {
268 uint32_t current_dispclk; 268 uint32_t current_dispclk;
269 uint32_t dp_extclk; 269 uint32_t dp_extclk;
270 uint32_t max_pixel_clock; 270 uint32_t max_pixel_clock;
271 uint32_t gpupll_outputfreq;
271}; 272};
272 273
273/* 274/*
@@ -1889,7 +1890,7 @@ struct radeon_asic {
1889 void (*pad_ib)(struct radeon_ib *ib); 1890 void (*pad_ib)(struct radeon_ib *ib);
1890 } vm; 1891 } vm;
1891 /* ring specific callbacks */ 1892 /* ring specific callbacks */
1892 struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; 1893 const struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
1893 /* irqs */ 1894 /* irqs */
1894 struct { 1895 struct {
1895 int (*set)(struct radeon_device *rdev); 1896 int (*set)(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 1d4d4520a0ac..7d5a36dd5094 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -179,7 +179,7 @@ void radeon_agp_disable(struct radeon_device *rdev)
179 * ASIC 179 * ASIC
180 */ 180 */
181 181
182static struct radeon_asic_ring r100_gfx_ring = { 182static const struct radeon_asic_ring r100_gfx_ring = {
183 .ib_execute = &r100_ring_ib_execute, 183 .ib_execute = &r100_ring_ib_execute,
184 .emit_fence = &r100_fence_ring_emit, 184 .emit_fence = &r100_fence_ring_emit,
185 .emit_semaphore = &r100_semaphore_ring_emit, 185 .emit_semaphore = &r100_semaphore_ring_emit,
@@ -329,7 +329,7 @@ static struct radeon_asic r200_asic = {
329 }, 329 },
330}; 330};
331 331
332static struct radeon_asic_ring r300_gfx_ring = { 332static const struct radeon_asic_ring r300_gfx_ring = {
333 .ib_execute = &r100_ring_ib_execute, 333 .ib_execute = &r100_ring_ib_execute,
334 .emit_fence = &r300_fence_ring_emit, 334 .emit_fence = &r300_fence_ring_emit,
335 .emit_semaphore = &r100_semaphore_ring_emit, 335 .emit_semaphore = &r100_semaphore_ring_emit,
@@ -343,7 +343,7 @@ static struct radeon_asic_ring r300_gfx_ring = {
343 .set_wptr = &r100_gfx_set_wptr, 343 .set_wptr = &r100_gfx_set_wptr,
344}; 344};
345 345
346static struct radeon_asic_ring rv515_gfx_ring = { 346static const struct radeon_asic_ring rv515_gfx_ring = {
347 .ib_execute = &r100_ring_ib_execute, 347 .ib_execute = &r100_ring_ib_execute,
348 .emit_fence = &r300_fence_ring_emit, 348 .emit_fence = &r300_fence_ring_emit,
349 .emit_semaphore = &r100_semaphore_ring_emit, 349 .emit_semaphore = &r100_semaphore_ring_emit,
@@ -901,7 +901,7 @@ static struct radeon_asic r520_asic = {
901 }, 901 },
902}; 902};
903 903
904static struct radeon_asic_ring r600_gfx_ring = { 904static const struct radeon_asic_ring r600_gfx_ring = {
905 .ib_execute = &r600_ring_ib_execute, 905 .ib_execute = &r600_ring_ib_execute,
906 .emit_fence = &r600_fence_ring_emit, 906 .emit_fence = &r600_fence_ring_emit,
907 .emit_semaphore = &r600_semaphore_ring_emit, 907 .emit_semaphore = &r600_semaphore_ring_emit,
@@ -914,7 +914,7 @@ static struct radeon_asic_ring r600_gfx_ring = {
914 .set_wptr = &r600_gfx_set_wptr, 914 .set_wptr = &r600_gfx_set_wptr,
915}; 915};
916 916
917static struct radeon_asic_ring r600_dma_ring = { 917static const struct radeon_asic_ring r600_dma_ring = {
918 .ib_execute = &r600_dma_ring_ib_execute, 918 .ib_execute = &r600_dma_ring_ib_execute,
919 .emit_fence = &r600_dma_fence_ring_emit, 919 .emit_fence = &r600_dma_fence_ring_emit,
920 .emit_semaphore = &r600_dma_semaphore_ring_emit, 920 .emit_semaphore = &r600_dma_semaphore_ring_emit,
@@ -999,7 +999,7 @@ static struct radeon_asic r600_asic = {
999 }, 999 },
1000}; 1000};
1001 1001
1002static struct radeon_asic_ring rv6xx_uvd_ring = { 1002static const struct radeon_asic_ring rv6xx_uvd_ring = {
1003 .ib_execute = &uvd_v1_0_ib_execute, 1003 .ib_execute = &uvd_v1_0_ib_execute,
1004 .emit_fence = &uvd_v1_0_fence_emit, 1004 .emit_fence = &uvd_v1_0_fence_emit,
1005 .emit_semaphore = &uvd_v1_0_semaphore_emit, 1005 .emit_semaphore = &uvd_v1_0_semaphore_emit,
@@ -1198,7 +1198,7 @@ static struct radeon_asic rs780_asic = {
1198 }, 1198 },
1199}; 1199};
1200 1200
1201static struct radeon_asic_ring rv770_uvd_ring = { 1201static const struct radeon_asic_ring rv770_uvd_ring = {
1202 .ib_execute = &uvd_v1_0_ib_execute, 1202 .ib_execute = &uvd_v1_0_ib_execute,
1203 .emit_fence = &uvd_v2_2_fence_emit, 1203 .emit_fence = &uvd_v2_2_fence_emit,
1204 .emit_semaphore = &uvd_v2_2_semaphore_emit, 1204 .emit_semaphore = &uvd_v2_2_semaphore_emit,
@@ -1305,7 +1305,7 @@ static struct radeon_asic rv770_asic = {
1305 }, 1305 },
1306}; 1306};
1307 1307
1308static struct radeon_asic_ring evergreen_gfx_ring = { 1308static const struct radeon_asic_ring evergreen_gfx_ring = {
1309 .ib_execute = &evergreen_ring_ib_execute, 1309 .ib_execute = &evergreen_ring_ib_execute,
1310 .emit_fence = &r600_fence_ring_emit, 1310 .emit_fence = &r600_fence_ring_emit,
1311 .emit_semaphore = &r600_semaphore_ring_emit, 1311 .emit_semaphore = &r600_semaphore_ring_emit,
@@ -1318,7 +1318,7 @@ static struct radeon_asic_ring evergreen_gfx_ring = {
1318 .set_wptr = &r600_gfx_set_wptr, 1318 .set_wptr = &r600_gfx_set_wptr,
1319}; 1319};
1320 1320
1321static struct radeon_asic_ring evergreen_dma_ring = { 1321static const struct radeon_asic_ring evergreen_dma_ring = {
1322 .ib_execute = &evergreen_dma_ring_ib_execute, 1322 .ib_execute = &evergreen_dma_ring_ib_execute,
1323 .emit_fence = &evergreen_dma_fence_ring_emit, 1323 .emit_fence = &evergreen_dma_fence_ring_emit,
1324 .emit_semaphore = &r600_dma_semaphore_ring_emit, 1324 .emit_semaphore = &r600_dma_semaphore_ring_emit,
@@ -1612,7 +1612,7 @@ static struct radeon_asic btc_asic = {
1612 }, 1612 },
1613}; 1613};
1614 1614
1615static struct radeon_asic_ring cayman_gfx_ring = { 1615static const struct radeon_asic_ring cayman_gfx_ring = {
1616 .ib_execute = &cayman_ring_ib_execute, 1616 .ib_execute = &cayman_ring_ib_execute,
1617 .ib_parse = &evergreen_ib_parse, 1617 .ib_parse = &evergreen_ib_parse,
1618 .emit_fence = &cayman_fence_ring_emit, 1618 .emit_fence = &cayman_fence_ring_emit,
@@ -1627,7 +1627,7 @@ static struct radeon_asic_ring cayman_gfx_ring = {
1627 .set_wptr = &cayman_gfx_set_wptr, 1627 .set_wptr = &cayman_gfx_set_wptr,
1628}; 1628};
1629 1629
1630static struct radeon_asic_ring cayman_dma_ring = { 1630static const struct radeon_asic_ring cayman_dma_ring = {
1631 .ib_execute = &cayman_dma_ring_ib_execute, 1631 .ib_execute = &cayman_dma_ring_ib_execute,
1632 .ib_parse = &evergreen_dma_ib_parse, 1632 .ib_parse = &evergreen_dma_ib_parse,
1633 .emit_fence = &evergreen_dma_fence_ring_emit, 1633 .emit_fence = &evergreen_dma_fence_ring_emit,
@@ -1642,7 +1642,7 @@ static struct radeon_asic_ring cayman_dma_ring = {
1642 .set_wptr = &cayman_dma_set_wptr 1642 .set_wptr = &cayman_dma_set_wptr
1643}; 1643};
1644 1644
1645static struct radeon_asic_ring cayman_uvd_ring = { 1645static const struct radeon_asic_ring cayman_uvd_ring = {
1646 .ib_execute = &uvd_v1_0_ib_execute, 1646 .ib_execute = &uvd_v1_0_ib_execute,
1647 .emit_fence = &uvd_v2_2_fence_emit, 1647 .emit_fence = &uvd_v2_2_fence_emit,
1648 .emit_semaphore = &uvd_v3_1_semaphore_emit, 1648 .emit_semaphore = &uvd_v3_1_semaphore_emit,
@@ -1760,7 +1760,7 @@ static struct radeon_asic cayman_asic = {
1760 }, 1760 },
1761}; 1761};
1762 1762
1763static struct radeon_asic_ring trinity_vce_ring = { 1763static const struct radeon_asic_ring trinity_vce_ring = {
1764 .ib_execute = &radeon_vce_ib_execute, 1764 .ib_execute = &radeon_vce_ib_execute,
1765 .emit_fence = &radeon_vce_fence_emit, 1765 .emit_fence = &radeon_vce_fence_emit,
1766 .emit_semaphore = &radeon_vce_semaphore_emit, 1766 .emit_semaphore = &radeon_vce_semaphore_emit,
@@ -1881,7 +1881,7 @@ static struct radeon_asic trinity_asic = {
1881 }, 1881 },
1882}; 1882};
1883 1883
1884static struct radeon_asic_ring si_gfx_ring = { 1884static const struct radeon_asic_ring si_gfx_ring = {
1885 .ib_execute = &si_ring_ib_execute, 1885 .ib_execute = &si_ring_ib_execute,
1886 .ib_parse = &si_ib_parse, 1886 .ib_parse = &si_ib_parse,
1887 .emit_fence = &si_fence_ring_emit, 1887 .emit_fence = &si_fence_ring_emit,
@@ -1896,7 +1896,7 @@ static struct radeon_asic_ring si_gfx_ring = {
1896 .set_wptr = &cayman_gfx_set_wptr, 1896 .set_wptr = &cayman_gfx_set_wptr,
1897}; 1897};
1898 1898
1899static struct radeon_asic_ring si_dma_ring = { 1899static const struct radeon_asic_ring si_dma_ring = {
1900 .ib_execute = &cayman_dma_ring_ib_execute, 1900 .ib_execute = &cayman_dma_ring_ib_execute,
1901 .ib_parse = &evergreen_dma_ib_parse, 1901 .ib_parse = &evergreen_dma_ib_parse,
1902 .emit_fence = &evergreen_dma_fence_ring_emit, 1902 .emit_fence = &evergreen_dma_fence_ring_emit,
@@ -2023,7 +2023,7 @@ static struct radeon_asic si_asic = {
2023 }, 2023 },
2024}; 2024};
2025 2025
2026static struct radeon_asic_ring ci_gfx_ring = { 2026static const struct radeon_asic_ring ci_gfx_ring = {
2027 .ib_execute = &cik_ring_ib_execute, 2027 .ib_execute = &cik_ring_ib_execute,
2028 .ib_parse = &cik_ib_parse, 2028 .ib_parse = &cik_ib_parse,
2029 .emit_fence = &cik_fence_gfx_ring_emit, 2029 .emit_fence = &cik_fence_gfx_ring_emit,
@@ -2038,7 +2038,7 @@ static struct radeon_asic_ring ci_gfx_ring = {
2038 .set_wptr = &cik_gfx_set_wptr, 2038 .set_wptr = &cik_gfx_set_wptr,
2039}; 2039};
2040 2040
2041static struct radeon_asic_ring ci_cp_ring = { 2041static const struct radeon_asic_ring ci_cp_ring = {
2042 .ib_execute = &cik_ring_ib_execute, 2042 .ib_execute = &cik_ring_ib_execute,
2043 .ib_parse = &cik_ib_parse, 2043 .ib_parse = &cik_ib_parse,
2044 .emit_fence = &cik_fence_compute_ring_emit, 2044 .emit_fence = &cik_fence_compute_ring_emit,
@@ -2053,7 +2053,7 @@ static struct radeon_asic_ring ci_cp_ring = {
2053 .set_wptr = &cik_compute_set_wptr, 2053 .set_wptr = &cik_compute_set_wptr,
2054}; 2054};
2055 2055
2056static struct radeon_asic_ring ci_dma_ring = { 2056static const struct radeon_asic_ring ci_dma_ring = {
2057 .ib_execute = &cik_sdma_ring_ib_execute, 2057 .ib_execute = &cik_sdma_ring_ib_execute,
2058 .ib_parse = &cik_ib_parse, 2058 .ib_parse = &cik_ib_parse,
2059 .emit_fence = &cik_sdma_fence_ring_emit, 2059 .emit_fence = &cik_sdma_fence_ring_emit,
@@ -2068,7 +2068,7 @@ static struct radeon_asic_ring ci_dma_ring = {
2068 .set_wptr = &cik_sdma_set_wptr, 2068 .set_wptr = &cik_sdma_set_wptr,
2069}; 2069};
2070 2070
2071static struct radeon_asic_ring ci_vce_ring = { 2071static const struct radeon_asic_ring ci_vce_ring = {
2072 .ib_execute = &radeon_vce_ib_execute, 2072 .ib_execute = &radeon_vce_ib_execute,
2073 .emit_fence = &radeon_vce_fence_emit, 2073 .emit_fence = &radeon_vce_fence_emit,
2074 .emit_semaphore = &radeon_vce_semaphore_emit, 2074 .emit_semaphore = &radeon_vce_semaphore_emit,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 8f285244c839..08fc1b5effa8 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
437 } 437 }
438 438
439 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ 439 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
440 if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && 440 if (((dev->pdev->device == 0x9802) ||
441 (dev->pdev->device == 0x9805) ||
442 (dev->pdev->device == 0x9806)) &&
441 (dev->pdev->subsystem_vendor == 0x1734) && 443 (dev->pdev->subsystem_vendor == 0x1734) &&
442 (dev->pdev->subsystem_device == 0x11bd)) { 444 (dev->pdev->subsystem_device == 0x11bd)) {
443 if (*connector_type == DRM_MODE_CONNECTOR_VGA) { 445 if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
448 } 450 }
449 } 451 }
450 452
451 /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
452 if ((dev->pdev->device == 0x9805) &&
453 (dev->pdev->subsystem_vendor == 0x1734) &&
454 (dev->pdev->subsystem_device == 0x11bd)) {
455 if (*connector_type == DRM_MODE_CONNECTOR_VGA)
456 return false;
457 }
458
459 return true; 453 return true;
460} 454}
461 455
@@ -1263,6 +1257,13 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1263 rdev->mode_info.firmware_flags = 1257 rdev->mode_info.firmware_flags =
1264 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); 1258 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
1265 1259
1260 if (ASIC_IS_DCE8(rdev)) {
1261 rdev->clock.gpupll_outputfreq =
1262 le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
1263 if (rdev->clock.gpupll_outputfreq == 0)
1264 rdev->clock.gpupll_outputfreq = 360000; /* 3.6 GHz */
1265 }
1266
1266 return true; 1267 return true;
1267 } 1268 }
1268 1269
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
deleted file mode 100644
index 500287eff55d..000000000000
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ /dev/null
@@ -1,2243 +0,0 @@
1/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
2/*
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * Copyright 2007 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Kevin E. Martin <martin@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 *
31 * ------------------------ This file is DEPRECATED! -------------------------
32 */
33
34#include <linux/module.h>
35
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
38#include "radeon_drv.h"
39#include "r300_reg.h"
40
41#define RADEON_FIFO_DEBUG 0
42
43/* Firmware Names */
44#define FIRMWARE_R100 "radeon/R100_cp.bin"
45#define FIRMWARE_R200 "radeon/R200_cp.bin"
46#define FIRMWARE_R300 "radeon/R300_cp.bin"
47#define FIRMWARE_R420 "radeon/R420_cp.bin"
48#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
49#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
50#define FIRMWARE_R520 "radeon/R520_cp.bin"
51
52MODULE_FIRMWARE(FIRMWARE_R100);
53MODULE_FIRMWARE(FIRMWARE_R200);
54MODULE_FIRMWARE(FIRMWARE_R300);
55MODULE_FIRMWARE(FIRMWARE_R420);
56MODULE_FIRMWARE(FIRMWARE_RS690);
57MODULE_FIRMWARE(FIRMWARE_RS600);
58MODULE_FIRMWARE(FIRMWARE_R520);
59
60static int radeon_do_cleanup_cp(struct drm_device * dev);
61static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
62
63u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off)
64{
65 u32 val;
66
67 if (dev_priv->flags & RADEON_IS_AGP) {
68 val = DRM_READ32(dev_priv->ring_rptr, off);
69 } else {
70 val = *(((volatile u32 *)
71 dev_priv->ring_rptr->handle) +
72 (off / sizeof(u32)));
73 val = le32_to_cpu(val);
74 }
75 return val;
76}
77
78u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv)
79{
80 if (dev_priv->writeback_works)
81 return radeon_read_ring_rptr(dev_priv, 0);
82 else {
83 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
84 return RADEON_READ(R600_CP_RB_RPTR);
85 else
86 return RADEON_READ(RADEON_CP_RB_RPTR);
87 }
88}
89
90void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val)
91{
92 if (dev_priv->flags & RADEON_IS_AGP)
93 DRM_WRITE32(dev_priv->ring_rptr, off, val);
94 else
95 *(((volatile u32 *) dev_priv->ring_rptr->handle) +
96 (off / sizeof(u32))) = cpu_to_le32(val);
97}
98
99void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val)
100{
101 radeon_write_ring_rptr(dev_priv, 0, val);
102}
103
104u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
105{
106 if (dev_priv->writeback_works) {
107 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
108 return radeon_read_ring_rptr(dev_priv,
109 R600_SCRATCHOFF(index));
110 else
111 return radeon_read_ring_rptr(dev_priv,
112 RADEON_SCRATCHOFF(index));
113 } else {
114 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
115 return RADEON_READ(R600_SCRATCH_REG0 + 4*index);
116 else
117 return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index);
118 }
119}
120
121static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
122{
123 u32 ret;
124 RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
125 ret = RADEON_READ(R520_MC_IND_DATA);
126 RADEON_WRITE(R520_MC_IND_INDEX, 0);
127 return ret;
128}
129
130static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
131{
132 u32 ret;
133 RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
134 ret = RADEON_READ(RS480_NB_MC_DATA);
135 RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
136 return ret;
137}
138
139static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
140{
141 u32 ret;
142 RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
143 ret = RADEON_READ(RS690_MC_DATA);
144 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
145 return ret;
146}
147
148static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
149{
150 u32 ret;
151 RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) |
152 RS600_MC_IND_CITF_ARB0));
153 ret = RADEON_READ(RS600_MC_DATA);
154 return ret;
155}
156
157static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
158{
159 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
160 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
161 return RS690_READ_MCIND(dev_priv, addr);
162 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
163 return RS600_READ_MCIND(dev_priv, addr);
164 else
165 return RS480_READ_MCIND(dev_priv, addr);
166}
167
168u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
169{
170
171 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
172 return RADEON_READ(R700_MC_VM_FB_LOCATION);
173 else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
174 return RADEON_READ(R600_MC_VM_FB_LOCATION);
175 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
176 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
177 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
178 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
179 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
180 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
181 return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION);
182 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
183 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
184 else
185 return RADEON_READ(RADEON_MC_FB_LOCATION);
186}
187
188static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
189{
190 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
191 RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc);
192 else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
193 RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc);
194 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
195 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
196 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
197 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
198 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
199 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
200 RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc);
201 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
202 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
203 else
204 RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
205}
206
207void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
208{
209 /*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */
210 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
211 RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
212 RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
213 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
214 RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
215 RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
216 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
217 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
218 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
219 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
220 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
221 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
222 RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc);
223 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
224 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
225 else
226 RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
227}
228
229void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
230{
231 u32 agp_base_hi = upper_32_bits(agp_base);
232 u32 agp_base_lo = agp_base & 0xffffffff;
233 u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff;
234
235 /* R6xx/R7xx must be aligned to a 4MB boundary */
236 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
237 RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base);
238 else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
239 RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base);
240 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
241 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
242 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
243 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
244 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
245 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
246 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
247 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
248 RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo);
249 RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi);
250 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
251 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
252 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
253 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
254 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
255 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
256 RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
257 } else {
258 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
259 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
260 RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
261 }
262}
263
264void radeon_enable_bm(struct drm_radeon_private *dev_priv)
265{
266 u32 tmp;
267 /* Turn on bus mastering */
268 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
269 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
270 /* rs600/rs690/rs740 */
271 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
272 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
273 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) ||
274 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
275 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
276 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
277 /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
278 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
279 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
280 } /* PCIE cards appears to not need this */
281}
282
283static int RADEON_READ_PLL(struct drm_device * dev, int addr)
284{
285 drm_radeon_private_t *dev_priv = dev->dev_private;
286
287 RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
288 return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
289}
290
291static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
292{
293 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
294 return RADEON_READ(RADEON_PCIE_DATA);
295}
296
297#if RADEON_FIFO_DEBUG
298static void radeon_status(drm_radeon_private_t * dev_priv)
299{
300 printk("%s:\n", __func__);
301 printk("RBBM_STATUS = 0x%08x\n",
302 (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
303 printk("CP_RB_RTPR = 0x%08x\n",
304 (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
305 printk("CP_RB_WTPR = 0x%08x\n",
306 (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
307 printk("AIC_CNTL = 0x%08x\n",
308 (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
309 printk("AIC_STAT = 0x%08x\n",
310 (unsigned int)RADEON_READ(RADEON_AIC_STAT));
311 printk("AIC_PT_BASE = 0x%08x\n",
312 (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
313 printk("TLB_ADDR = 0x%08x\n",
314 (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
315 printk("TLB_DATA = 0x%08x\n",
316 (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
317}
318#endif
319
320/* ================================================================
321 * Engine, FIFO control
322 */
323
324static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
325{
326 u32 tmp;
327 int i;
328
329 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
330
331 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
332 tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
333 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
334 RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
335
336 for (i = 0; i < dev_priv->usec_timeout; i++) {
337 if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
338 & RADEON_RB3D_DC_BUSY)) {
339 return 0;
340 }
341 DRM_UDELAY(1);
342 }
343 } else {
344 /* don't flush or purge cache here or lockup */
345 return 0;
346 }
347
348#if RADEON_FIFO_DEBUG
349 DRM_ERROR("failed!\n");
350 radeon_status(dev_priv);
351#endif
352 return -EBUSY;
353}
354
355static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
356{
357 int i;
358
359 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
360
361 for (i = 0; i < dev_priv->usec_timeout; i++) {
362 int slots = (RADEON_READ(RADEON_RBBM_STATUS)
363 & RADEON_RBBM_FIFOCNT_MASK);
364 if (slots >= entries)
365 return 0;
366 DRM_UDELAY(1);
367 }
368 DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
369 RADEON_READ(RADEON_RBBM_STATUS),
370 RADEON_READ(R300_VAP_CNTL_STATUS));
371
372#if RADEON_FIFO_DEBUG
373 DRM_ERROR("failed!\n");
374 radeon_status(dev_priv);
375#endif
376 return -EBUSY;
377}
378
379static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
380{
381 int i, ret;
382
383 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
384
385 ret = radeon_do_wait_for_fifo(dev_priv, 64);
386 if (ret)
387 return ret;
388
389 for (i = 0; i < dev_priv->usec_timeout; i++) {
390 if (!(RADEON_READ(RADEON_RBBM_STATUS)
391 & RADEON_RBBM_ACTIVE)) {
392 radeon_do_pixcache_flush(dev_priv);
393 return 0;
394 }
395 DRM_UDELAY(1);
396 }
397 DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
398 RADEON_READ(RADEON_RBBM_STATUS),
399 RADEON_READ(R300_VAP_CNTL_STATUS));
400
401#if RADEON_FIFO_DEBUG
402 DRM_ERROR("failed!\n");
403 radeon_status(dev_priv);
404#endif
405 return -EBUSY;
406}
407
408static void radeon_init_pipes(struct drm_device *dev)
409{
410 drm_radeon_private_t *dev_priv = dev->dev_private;
411 uint32_t gb_tile_config, gb_pipe_sel = 0;
412
413 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
414 uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2);
415 if ((z_pipe_sel & 3) == 3)
416 dev_priv->num_z_pipes = 2;
417 else
418 dev_priv->num_z_pipes = 1;
419 } else
420 dev_priv->num_z_pipes = 1;
421
422 /* RS4xx/RS6xx/R4xx/R5xx */
423 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
424 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
425 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
426 /* SE cards have 1 pipe */
427 if ((dev->pdev->device == 0x5e4c) ||
428 (dev->pdev->device == 0x5e4f))
429 dev_priv->num_gb_pipes = 1;
430 } else {
431 /* R3xx */
432 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
433 dev->pdev->device != 0x4144) ||
434 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
435 dev->pdev->device != 0x4148)) {
436 dev_priv->num_gb_pipes = 2;
437 } else {
438 /* RV3xx/R300 AD/R350 AH */
439 dev_priv->num_gb_pipes = 1;
440 }
441 }
442 DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
443
444 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
445
446 switch (dev_priv->num_gb_pipes) {
447 case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
448 case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
449 case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
450 default:
451 case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
452 }
453
454 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
455 RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
456 RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
457 }
458 RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
459 radeon_do_wait_for_idle(dev_priv);
460 RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
461 RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
462 R300_DC_AUTOFLUSH_ENABLE |
463 R300_DC_DC_DISABLE_IGNORE_PE));
464
465
466}
467
468/* ================================================================
469 * CP control, initialization
470 */
471
472/* Load the microcode for the CP */
473static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv)
474{
475 struct platform_device *pdev;
476 const char *fw_name = NULL;
477 int err;
478
479 DRM_DEBUG("\n");
480
481 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
482 err = IS_ERR(pdev);
483 if (err) {
484 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
485 return -EINVAL;
486 }
487
488 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
489 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
490 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
491 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
492 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
493 DRM_INFO("Loading R100 Microcode\n");
494 fw_name = FIRMWARE_R100;
495 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
496 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
497 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
498 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
499 DRM_INFO("Loading R200 Microcode\n");
500 fw_name = FIRMWARE_R200;
501 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
502 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
503 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
504 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
505 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
506 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
507 DRM_INFO("Loading R300 Microcode\n");
508 fw_name = FIRMWARE_R300;
509 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
510 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
511 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
512 DRM_INFO("Loading R400 Microcode\n");
513 fw_name = FIRMWARE_R420;
514 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
515 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
516 DRM_INFO("Loading RS690/RS740 Microcode\n");
517 fw_name = FIRMWARE_RS690;
518 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
519 DRM_INFO("Loading RS600 Microcode\n");
520 fw_name = FIRMWARE_RS600;
521 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
522 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
523 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
524 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
525 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
526 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
527 DRM_INFO("Loading R500 Microcode\n");
528 fw_name = FIRMWARE_R520;
529 }
530
531 err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
532 platform_device_unregister(pdev);
533 if (err) {
534 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
535 fw_name);
536 } else if (dev_priv->me_fw->size % 8) {
537 printk(KERN_ERR
538 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
539 dev_priv->me_fw->size, fw_name);
540 err = -EINVAL;
541 release_firmware(dev_priv->me_fw);
542 dev_priv->me_fw = NULL;
543 }
544 return err;
545}
546
547static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv)
548{
549 const __be32 *fw_data;
550 int i, size;
551
552 radeon_do_wait_for_idle(dev_priv);
553
554 if (dev_priv->me_fw) {
555 size = dev_priv->me_fw->size / 4;
556 fw_data = (const __be32 *)&dev_priv->me_fw->data[0];
557 RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
558 for (i = 0; i < size; i += 2) {
559 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
560 be32_to_cpup(&fw_data[i]));
561 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
562 be32_to_cpup(&fw_data[i + 1]));
563 }
564 }
565}
566
567/* Flush any pending commands to the CP. This should only be used just
568 * prior to a wait for idle, as it informs the engine that the command
569 * stream is ending.
570 */
571static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
572{
573 DRM_DEBUG("\n");
574#if 0
575 u32 tmp;
576
577 tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
578 RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
579#endif
580}
581
582/* Wait for the CP to go idle.
583 */
584int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
585{
586 RING_LOCALS;
587 DRM_DEBUG("\n");
588
589 BEGIN_RING(6);
590
591 RADEON_PURGE_CACHE();
592 RADEON_PURGE_ZCACHE();
593 RADEON_WAIT_UNTIL_IDLE();
594
595 ADVANCE_RING();
596 COMMIT_RING();
597
598 return radeon_do_wait_for_idle(dev_priv);
599}
600
601/* Start the Command Processor.
602 */
603static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
604{
605 RING_LOCALS;
606 DRM_DEBUG("\n");
607
608 radeon_do_wait_for_idle(dev_priv);
609
610 RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
611
612 dev_priv->cp_running = 1;
613
614 /* on r420, any DMA from CP to system memory while 2D is active
615 * can cause a hang. workaround is to queue a CP RESYNC token
616 */
617 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
618 BEGIN_RING(3);
619 OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1));
620 OUT_RING(5); /* scratch reg 5 */
621 OUT_RING(0xdeadbeef);
622 ADVANCE_RING();
623 COMMIT_RING();
624 }
625
626 BEGIN_RING(8);
627 /* isync can only be written through cp on r5xx write it here */
628 OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
629 OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
630 RADEON_ISYNC_ANY3D_IDLE2D |
631 RADEON_ISYNC_WAIT_IDLEGUI |
632 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
633 RADEON_PURGE_CACHE();
634 RADEON_PURGE_ZCACHE();
635 RADEON_WAIT_UNTIL_IDLE();
636 ADVANCE_RING();
637 COMMIT_RING();
638
639 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
640}
641
642/* Reset the Command Processor. This will not flush any pending
643 * commands, so you must wait for the CP command stream to complete
644 * before calling this routine.
645 */
646static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
647{
648 u32 cur_read_ptr;
649 DRM_DEBUG("\n");
650
651 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
652 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
653 SET_RING_HEAD(dev_priv, cur_read_ptr);
654 dev_priv->ring.tail = cur_read_ptr;
655}
656
657/* Stop the Command Processor. This will not flush any pending
658 * commands, so you must flush the command stream and wait for the CP
659 * to go idle before calling this routine.
660 */
661static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
662{
663 RING_LOCALS;
664 DRM_DEBUG("\n");
665
666 /* finish the pending CP_RESYNC token */
667 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
668 BEGIN_RING(2);
669 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
670 OUT_RING(R300_RB3D_DC_FINISH);
671 ADVANCE_RING();
672 COMMIT_RING();
673 radeon_do_wait_for_idle(dev_priv);
674 }
675
676 RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
677
678 dev_priv->cp_running = 0;
679}
680
681/* Reset the engine. This will stop the CP if it is running.
682 */
683static int radeon_do_engine_reset(struct drm_device * dev)
684{
685 drm_radeon_private_t *dev_priv = dev->dev_private;
686 u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
687 DRM_DEBUG("\n");
688
689 radeon_do_pixcache_flush(dev_priv);
690
691 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
692 /* may need something similar for newer chips */
693 clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
694 mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
695
696 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
697 RADEON_FORCEON_MCLKA |
698 RADEON_FORCEON_MCLKB |
699 RADEON_FORCEON_YCLKA |
700 RADEON_FORCEON_YCLKB |
701 RADEON_FORCEON_MC |
702 RADEON_FORCEON_AIC));
703 }
704
705 rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
706
707 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
708 RADEON_SOFT_RESET_CP |
709 RADEON_SOFT_RESET_HI |
710 RADEON_SOFT_RESET_SE |
711 RADEON_SOFT_RESET_RE |
712 RADEON_SOFT_RESET_PP |
713 RADEON_SOFT_RESET_E2 |
714 RADEON_SOFT_RESET_RB));
715 RADEON_READ(RADEON_RBBM_SOFT_RESET);
716 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
717 ~(RADEON_SOFT_RESET_CP |
718 RADEON_SOFT_RESET_HI |
719 RADEON_SOFT_RESET_SE |
720 RADEON_SOFT_RESET_RE |
721 RADEON_SOFT_RESET_PP |
722 RADEON_SOFT_RESET_E2 |
723 RADEON_SOFT_RESET_RB)));
724 RADEON_READ(RADEON_RBBM_SOFT_RESET);
725
726 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
727 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
728 RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
729 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
730 }
731
732 /* setup the raster pipes */
733 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
734 radeon_init_pipes(dev);
735
736 /* Reset the CP ring */
737 radeon_do_cp_reset(dev_priv);
738
739 /* The CP is no longer running after an engine reset */
740 dev_priv->cp_running = 0;
741
742 /* Reset any pending vertex, indirect buffers */
743 radeon_freelist_reset(dev);
744
745 return 0;
746}
747
748static void radeon_cp_init_ring_buffer(struct drm_device * dev,
749 drm_radeon_private_t *dev_priv,
750 struct drm_file *file_priv)
751{
752 struct drm_radeon_master_private *master_priv;
753 u32 ring_start, cur_read_ptr;
754
755 /* Initialize the memory controller. With new memory map, the fb location
756 * is not changed, it should have been properly initialized already. Part
757 * of the problem is that the code below is bogus, assuming the GART is
758 * always appended to the fb which is not necessarily the case
759 */
760 if (!dev_priv->new_memmap)
761 radeon_write_fb_location(dev_priv,
762 ((dev_priv->gart_vm_start - 1) & 0xffff0000)
763 | (dev_priv->fb_location >> 16));
764
765#if IS_ENABLED(CONFIG_AGP)
766 if (dev_priv->flags & RADEON_IS_AGP) {
767 radeon_write_agp_base(dev_priv, dev->agp->base);
768
769 radeon_write_agp_location(dev_priv,
770 (((dev_priv->gart_vm_start - 1 +
771 dev_priv->gart_size) & 0xffff0000) |
772 (dev_priv->gart_vm_start >> 16)));
773
774 ring_start = (dev_priv->cp_ring->offset
775 - dev->agp->base
776 + dev_priv->gart_vm_start);
777 } else
778#endif
779 ring_start = (dev_priv->cp_ring->offset
780 - (unsigned long)dev->sg->virtual
781 + dev_priv->gart_vm_start);
782
783 RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
784
785 /* Set the write pointer delay */
786 RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
787
788 /* Initialize the ring buffer's read and write pointers */
789 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
790 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
791 SET_RING_HEAD(dev_priv, cur_read_ptr);
792 dev_priv->ring.tail = cur_read_ptr;
793
794#if IS_ENABLED(CONFIG_AGP)
795 if (dev_priv->flags & RADEON_IS_AGP) {
796 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
797 dev_priv->ring_rptr->offset
798 - dev->agp->base + dev_priv->gart_vm_start);
799 } else
800#endif
801 {
802 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
803 dev_priv->ring_rptr->offset
804 - ((unsigned long) dev->sg->virtual)
805 + dev_priv->gart_vm_start);
806 }
807
808 /* Set ring buffer size */
809#ifdef __BIG_ENDIAN
810 RADEON_WRITE(RADEON_CP_RB_CNTL,
811 RADEON_BUF_SWAP_32BIT |
812 (dev_priv->ring.fetch_size_l2ow << 18) |
813 (dev_priv->ring.rptr_update_l2qw << 8) |
814 dev_priv->ring.size_l2qw);
815#else
816 RADEON_WRITE(RADEON_CP_RB_CNTL,
817 (dev_priv->ring.fetch_size_l2ow << 18) |
818 (dev_priv->ring.rptr_update_l2qw << 8) |
819 dev_priv->ring.size_l2qw);
820#endif
821
822
823 /* Initialize the scratch register pointer. This will cause
824 * the scratch register values to be written out to memory
825 * whenever they are updated.
826 *
827 * We simply put this behind the ring read pointer, this works
828 * with PCI GART as well as (whatever kind of) AGP GART
829 */
830 RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
831 + RADEON_SCRATCH_REG_OFFSET);
832
833 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
834
835 radeon_enable_bm(dev_priv);
836
837 radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0);
838 RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
839
840 radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
841 RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
842
843 radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0);
844 RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
845
846 /* reset sarea copies of these */
847 master_priv = file_priv->master->driver_priv;
848 if (master_priv->sarea_priv) {
849 master_priv->sarea_priv->last_frame = 0;
850 master_priv->sarea_priv->last_dispatch = 0;
851 master_priv->sarea_priv->last_clear = 0;
852 }
853
854 radeon_do_wait_for_idle(dev_priv);
855
856 /* Sync everything up */
857 RADEON_WRITE(RADEON_ISYNC_CNTL,
858 (RADEON_ISYNC_ANY2D_IDLE3D |
859 RADEON_ISYNC_ANY3D_IDLE2D |
860 RADEON_ISYNC_WAIT_IDLEGUI |
861 RADEON_ISYNC_CPSCRATCH_IDLEGUI));
862
863}
864
865static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
866{
867 u32 tmp;
868
869 /* Start with assuming that writeback doesn't work */
870 dev_priv->writeback_works = 0;
871
872 /* Writeback doesn't seem to work everywhere, test it here and possibly
873 * enable it if it appears to work
874 */
875 radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
876
877 RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
878
879 for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
880 u32 val;
881
882 val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
883 if (val == 0xdeadbeef)
884 break;
885 DRM_UDELAY(1);
886 }
887
888 if (tmp < dev_priv->usec_timeout) {
889 dev_priv->writeback_works = 1;
890 DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
891 } else {
892 dev_priv->writeback_works = 0;
893 DRM_INFO("writeback test failed\n");
894 }
895 if (radeon_no_wb == 1) {
896 dev_priv->writeback_works = 0;
897 DRM_INFO("writeback forced off\n");
898 }
899
900 if (!dev_priv->writeback_works) {
901 /* Disable writeback to avoid unnecessary bus master transfer */
902 RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
903 RADEON_RB_NO_UPDATE);
904 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
905 }
906}
907
908/* Enable or disable IGP GART on the chip */
909static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
910{
911 u32 temp;
912
913 if (on) {
914 DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
915 dev_priv->gart_vm_start,
916 (long)dev_priv->gart_info.bus_addr,
917 dev_priv->gart_size);
918
919 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
920 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
921 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
922 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
923 RS690_BLOCK_GFX_D3_EN));
924 else
925 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
926
927 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
928 RS480_VA_SIZE_32MB));
929
930 temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
931 IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
932 RS480_TLB_ENABLE |
933 RS480_GTW_LAC_EN |
934 RS480_1LEVEL_GART));
935
936 temp = dev_priv->gart_info.bus_addr & 0xfffff000;
937 temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
938 IGP_WRITE_MCIND(RS480_GART_BASE, temp);
939
940 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
941 IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
942 RS480_REQ_TYPE_SNOOP_DIS));
943
944 radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
945
946 dev_priv->gart_size = 32*1024*1024;
947 temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
948 0xffff0000) | (dev_priv->gart_vm_start >> 16));
949
950 radeon_write_agp_location(dev_priv, temp);
951
952 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
953 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
954 RS480_VA_SIZE_32MB));
955
956 do {
957 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
958 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
959 break;
960 DRM_UDELAY(1);
961 } while (1);
962
963 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
964 RS480_GART_CACHE_INVALIDATE);
965
966 do {
967 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
968 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
969 break;
970 DRM_UDELAY(1);
971 } while (1);
972
973 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
974 } else {
975 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
976 }
977}
978
979/* Enable or disable IGP GART on the chip */
980static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on)
981{
982 u32 temp;
983 int i;
984
985 if (on) {
986 DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
987 dev_priv->gart_vm_start,
988 (long)dev_priv->gart_info.bus_addr,
989 dev_priv->gart_size);
990
991 IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
992 RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
993
994 for (i = 0; i < 19; i++)
995 IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i,
996 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
997 RS600_SYSTEM_ACCESS_MODE_IN_SYS |
998 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH |
999 RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
1000 RS600_ENABLE_FRAGMENT_PROCESSING |
1001 RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
1002
1003 IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE |
1004 RS600_PAGE_TABLE_TYPE_FLAT));
1005
1006 /* disable all other contexts */
1007 for (i = 1; i < 8; i++)
1008 IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
1009
1010 /* setup the page table aperture */
1011 IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
1012 dev_priv->gart_info.bus_addr);
1013 IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR,
1014 dev_priv->gart_vm_start);
1015 IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR,
1016 (dev_priv->gart_vm_start + dev_priv->gart_size - 1));
1017 IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
1018
1019 /* setup the system aperture */
1020 IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR,
1021 dev_priv->gart_vm_start);
1022 IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR,
1023 (dev_priv->gart_vm_start + dev_priv->gart_size - 1));
1024
1025 /* enable page tables */
1026 temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
1027 IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT));
1028
1029 temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
1030 IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES));
1031
1032 /* invalidate the cache */
1033 temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
1034
1035 temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
1036 IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
1037 temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
1038
1039 temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
1040 IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
1041 temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
1042
1043 temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
1044 IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
1045 temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
1046
1047 } else {
1048 IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0);
1049 temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
1050 temp &= ~RS600_ENABLE_PAGE_TABLES;
1051 IGP_WRITE_MCIND(RS600_MC_CNTL1, temp);
1052 }
1053}
1054
1055static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
1056{
1057 u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
1058 if (on) {
1059
1060 DRM_DEBUG("programming pcie %08X %08lX %08X\n",
1061 dev_priv->gart_vm_start,
1062 (long)dev_priv->gart_info.bus_addr,
1063 dev_priv->gart_size);
1064 RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
1065 dev_priv->gart_vm_start);
1066 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
1067 dev_priv->gart_info.bus_addr);
1068 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
1069 dev_priv->gart_vm_start);
1070 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
1071 dev_priv->gart_vm_start +
1072 dev_priv->gart_size - 1);
1073
1074 radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
1075
1076 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
1077 RADEON_PCIE_TX_GART_EN);
1078 } else {
1079 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
1080 tmp & ~RADEON_PCIE_TX_GART_EN);
1081 }
1082}
1083
1084/* Enable or disable PCI GART on the chip */
1085static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1086{
1087 u32 tmp;
1088
1089 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
1090 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
1091 (dev_priv->flags & RADEON_IS_IGPGART)) {
1092 radeon_set_igpgart(dev_priv, on);
1093 return;
1094 }
1095
1096 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
1097 rs600_set_igpgart(dev_priv, on);
1098 return;
1099 }
1100
1101 if (dev_priv->flags & RADEON_IS_PCIE) {
1102 radeon_set_pciegart(dev_priv, on);
1103 return;
1104 }
1105
1106 tmp = RADEON_READ(RADEON_AIC_CNTL);
1107
1108 if (on) {
1109 RADEON_WRITE(RADEON_AIC_CNTL,
1110 tmp | RADEON_PCIGART_TRANSLATE_EN);
1111
1112 /* set PCI GART page-table base address
1113 */
1114 RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
1115
1116 /* set address range for PCI address translate
1117 */
1118 RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
1119 RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
1120 + dev_priv->gart_size - 1);
1121
1122 /* Turn off AGP aperture -- is this required for PCI GART?
1123 */
1124 radeon_write_agp_location(dev_priv, 0xffffffc0);
1125 RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
1126 } else {
1127 RADEON_WRITE(RADEON_AIC_CNTL,
1128 tmp & ~RADEON_PCIGART_TRANSLATE_EN);
1129 }
1130}
1131
1132static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv)
1133{
1134 struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
1135 struct radeon_virt_surface *vp;
1136 int i;
1137
1138 for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) {
1139 if (!dev_priv->virt_surfaces[i].file_priv ||
1140 dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV)
1141 break;
1142 }
1143 if (i >= 2 * RADEON_MAX_SURFACES)
1144 return -ENOMEM;
1145 vp = &dev_priv->virt_surfaces[i];
1146
1147 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1148 struct radeon_surface *sp = &dev_priv->surfaces[i];
1149 if (sp->refcount)
1150 continue;
1151
1152 vp->surface_index = i;
1153 vp->lower = gart_info->bus_addr;
1154 vp->upper = vp->lower + gart_info->table_size;
1155 vp->flags = 0;
1156 vp->file_priv = PCIGART_FILE_PRIV;
1157
1158 sp->refcount = 1;
1159 sp->lower = vp->lower;
1160 sp->upper = vp->upper;
1161 sp->flags = 0;
1162
1163 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags);
1164 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower);
1165 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper);
1166 return 0;
1167 }
1168
1169 return -ENOMEM;
1170}
1171
1172static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1173 struct drm_file *file_priv)
1174{
1175 drm_radeon_private_t *dev_priv = dev->dev_private;
1176 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
1177
1178 DRM_DEBUG("\n");
1179
1180 /* if we require new memory map but we don't have it fail */
1181 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
1182 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
1183 radeon_do_cleanup_cp(dev);
1184 return -EINVAL;
1185 }
1186
1187 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
1188 DRM_DEBUG("Forcing AGP card to PCI mode\n");
1189 dev_priv->flags &= ~RADEON_IS_AGP;
1190 } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
1191 && !init->is_pci) {
1192 DRM_DEBUG("Restoring AGP flag\n");
1193 dev_priv->flags |= RADEON_IS_AGP;
1194 }
1195
1196 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
1197 DRM_ERROR("PCI GART memory not allocated!\n");
1198 radeon_do_cleanup_cp(dev);
1199 return -EINVAL;
1200 }
1201
1202 dev_priv->usec_timeout = init->usec_timeout;
1203 if (dev_priv->usec_timeout < 1 ||
1204 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
1205 DRM_DEBUG("TIMEOUT problem!\n");
1206 radeon_do_cleanup_cp(dev);
1207 return -EINVAL;
1208 }
1209
1210 /* Enable vblank on CRTC1 for older X servers
1211 */
1212 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
1213
1214 switch(init->func) {
1215 case RADEON_INIT_R200_CP:
1216 dev_priv->microcode_version = UCODE_R200;
1217 break;
1218 case RADEON_INIT_R300_CP:
1219 dev_priv->microcode_version = UCODE_R300;
1220 break;
1221 default:
1222 dev_priv->microcode_version = UCODE_R100;
1223 }
1224
1225 dev_priv->do_boxes = 0;
1226 dev_priv->cp_mode = init->cp_mode;
1227
1228 /* We don't support anything other than bus-mastering ring mode,
1229 * but the ring can be in either AGP or PCI space for the ring
1230 * read pointer.
1231 */
1232 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
1233 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
1234 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
1235 radeon_do_cleanup_cp(dev);
1236 return -EINVAL;
1237 }
1238
1239 switch (init->fb_bpp) {
1240 case 16:
1241 dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
1242 break;
1243 case 32:
1244 default:
1245 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
1246 break;
1247 }
1248 dev_priv->front_offset = init->front_offset;
1249 dev_priv->front_pitch = init->front_pitch;
1250 dev_priv->back_offset = init->back_offset;
1251 dev_priv->back_pitch = init->back_pitch;
1252
1253 switch (init->depth_bpp) {
1254 case 16:
1255 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
1256 break;
1257 case 32:
1258 default:
1259 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
1260 break;
1261 }
1262 dev_priv->depth_offset = init->depth_offset;
1263 dev_priv->depth_pitch = init->depth_pitch;
1264
1265 /* Hardware state for depth clears. Remove this if/when we no
1266 * longer clear the depth buffer with a 3D rectangle. Hard-code
1267 * all values to prevent unwanted 3D state from slipping through
1268 * and screwing with the clear operation.
1269 */
1270 dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
1271 (dev_priv->color_fmt << 10) |
1272 (dev_priv->microcode_version ==
1273 UCODE_R100 ? RADEON_ZBLOCK16 : 0));
1274
1275 dev_priv->depth_clear.rb3d_zstencilcntl =
1276 (dev_priv->depth_fmt |
1277 RADEON_Z_TEST_ALWAYS |
1278 RADEON_STENCIL_TEST_ALWAYS |
1279 RADEON_STENCIL_S_FAIL_REPLACE |
1280 RADEON_STENCIL_ZPASS_REPLACE |
1281 RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
1282
1283 dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
1284 RADEON_BFACE_SOLID |
1285 RADEON_FFACE_SOLID |
1286 RADEON_FLAT_SHADE_VTX_LAST |
1287 RADEON_DIFFUSE_SHADE_FLAT |
1288 RADEON_ALPHA_SHADE_FLAT |
1289 RADEON_SPECULAR_SHADE_FLAT |
1290 RADEON_FOG_SHADE_FLAT |
1291 RADEON_VTX_PIX_CENTER_OGL |
1292 RADEON_ROUND_MODE_TRUNC |
1293 RADEON_ROUND_PREC_8TH_PIX);
1294
1295
1296 dev_priv->ring_offset = init->ring_offset;
1297 dev_priv->ring_rptr_offset = init->ring_rptr_offset;
1298 dev_priv->buffers_offset = init->buffers_offset;
1299 dev_priv->gart_textures_offset = init->gart_textures_offset;
1300
1301 master_priv->sarea = drm_legacy_getsarea(dev);
1302 if (!master_priv->sarea) {
1303 DRM_ERROR("could not find sarea!\n");
1304 radeon_do_cleanup_cp(dev);
1305 return -EINVAL;
1306 }
1307
1308 dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset);
1309 if (!dev_priv->cp_ring) {
1310 DRM_ERROR("could not find cp ring region!\n");
1311 radeon_do_cleanup_cp(dev);
1312 return -EINVAL;
1313 }
1314 dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset);
1315 if (!dev_priv->ring_rptr) {
1316 DRM_ERROR("could not find ring read pointer!\n");
1317 radeon_do_cleanup_cp(dev);
1318 return -EINVAL;
1319 }
1320 dev->agp_buffer_token = init->buffers_offset;
1321 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
1322 if (!dev->agp_buffer_map) {
1323 DRM_ERROR("could not find dma buffer region!\n");
1324 radeon_do_cleanup_cp(dev);
1325 return -EINVAL;
1326 }
1327
1328 if (init->gart_textures_offset) {
1329 dev_priv->gart_textures =
1330 drm_legacy_findmap(dev, init->gart_textures_offset);
1331 if (!dev_priv->gart_textures) {
1332 DRM_ERROR("could not find GART texture region!\n");
1333 radeon_do_cleanup_cp(dev);
1334 return -EINVAL;
1335 }
1336 }
1337
1338#if IS_ENABLED(CONFIG_AGP)
1339 if (dev_priv->flags & RADEON_IS_AGP) {
1340 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
1341 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
1342 drm_legacy_ioremap_wc(dev->agp_buffer_map, dev);
1343 if (!dev_priv->cp_ring->handle ||
1344 !dev_priv->ring_rptr->handle ||
1345 !dev->agp_buffer_map->handle) {
1346 DRM_ERROR("could not find ioremap agp regions!\n");
1347 radeon_do_cleanup_cp(dev);
1348 return -EINVAL;
1349 }
1350 } else
1351#endif
1352 {
1353 dev_priv->cp_ring->handle =
1354 (void *)(unsigned long)dev_priv->cp_ring->offset;
1355 dev_priv->ring_rptr->handle =
1356 (void *)(unsigned long)dev_priv->ring_rptr->offset;
1357 dev->agp_buffer_map->handle =
1358 (void *)(unsigned long)dev->agp_buffer_map->offset;
1359
1360 DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
1361 dev_priv->cp_ring->handle);
1362 DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
1363 dev_priv->ring_rptr->handle);
1364 DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
1365 dev->agp_buffer_map->handle);
1366 }
1367
1368 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
1369 dev_priv->fb_size =
1370 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
1371 - dev_priv->fb_location;
1372
1373 dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
1374 ((dev_priv->front_offset
1375 + dev_priv->fb_location) >> 10));
1376
1377 dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
1378 ((dev_priv->back_offset
1379 + dev_priv->fb_location) >> 10));
1380
1381 dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
1382 ((dev_priv->depth_offset
1383 + dev_priv->fb_location) >> 10));
1384
1385 dev_priv->gart_size = init->gart_size;
1386
1387 /* New let's set the memory map ... */
1388 if (dev_priv->new_memmap) {
1389 u32 base = 0;
1390
1391 DRM_INFO("Setting GART location based on new memory map\n");
1392
1393 /* If using AGP, try to locate the AGP aperture at the same
1394 * location in the card and on the bus, though we have to
1395 * align it down.
1396 */
1397#if IS_ENABLED(CONFIG_AGP)
1398 if (dev_priv->flags & RADEON_IS_AGP) {
1399 base = dev->agp->base;
1400 /* Check if valid */
1401 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
1402 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
1403 DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
1404 dev->agp->base);
1405 base = 0;
1406 }
1407 }
1408#endif
1409 /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
1410 if (base == 0) {
1411 base = dev_priv->fb_location + dev_priv->fb_size;
1412 if (base < dev_priv->fb_location ||
1413 ((base + dev_priv->gart_size) & 0xfffffffful) < base)
1414 base = dev_priv->fb_location
1415 - dev_priv->gart_size;
1416 }
1417 dev_priv->gart_vm_start = base & 0xffc00000u;
1418 if (dev_priv->gart_vm_start != base)
1419 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
1420 base, dev_priv->gart_vm_start);
1421 } else {
1422 DRM_INFO("Setting GART location based on old memory map\n");
1423 dev_priv->gart_vm_start = dev_priv->fb_location +
1424 RADEON_READ(RADEON_CONFIG_APER_SIZE);
1425 }
1426
1427#if IS_ENABLED(CONFIG_AGP)
1428 if (dev_priv->flags & RADEON_IS_AGP)
1429 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1430 - dev->agp->base
1431 + dev_priv->gart_vm_start);
1432 else
1433#endif
1434 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1435 - (unsigned long)dev->sg->virtual
1436 + dev_priv->gart_vm_start);
1437
1438 DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
1439 DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
1440 DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
1441 dev_priv->gart_buffers_offset);
1442
1443 dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1445 + init->ring_size / sizeof(u32));
1446 dev_priv->ring.size = init->ring_size;
1447 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
1448
1449 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1450 dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
1451
1452 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1453 dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
1454 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1455
1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1457
1458#if IS_ENABLED(CONFIG_AGP)
1459 if (dev_priv->flags & RADEON_IS_AGP) {
1460 /* Turn off PCI GART */
1461 radeon_set_pcigart(dev_priv, 0);
1462 } else
1463#endif
1464 {
1465 u32 sctrl;
1466 int ret;
1467
1468 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
1469 /* if we have an offset set from userspace */
1470 if (dev_priv->pcigart_offset_set) {
1471 dev_priv->gart_info.bus_addr =
1472 (resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location;
1473 dev_priv->gart_info.mapping.offset =
1474 dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
1475 dev_priv->gart_info.mapping.size =
1476 dev_priv->gart_info.table_size;
1477
1478 drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev);
1479 dev_priv->gart_info.addr =
1480 dev_priv->gart_info.mapping.handle;
1481
1482 if (dev_priv->flags & RADEON_IS_PCIE)
1483 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
1484 else
1485 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1486 dev_priv->gart_info.gart_table_location =
1487 DRM_ATI_GART_FB;
1488
1489 DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
1490 dev_priv->gart_info.addr,
1491 dev_priv->pcigart_offset);
1492 } else {
1493 if (dev_priv->flags & RADEON_IS_IGPGART)
1494 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
1495 else
1496 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1497 dev_priv->gart_info.gart_table_location =
1498 DRM_ATI_GART_MAIN;
1499 dev_priv->gart_info.addr = NULL;
1500 dev_priv->gart_info.bus_addr = 0;
1501 if (dev_priv->flags & RADEON_IS_PCIE) {
1502 DRM_ERROR
1503 ("Cannot use PCI Express without GART in FB memory\n");
1504 radeon_do_cleanup_cp(dev);
1505 return -EINVAL;
1506 }
1507 }
1508
1509 sctrl = RADEON_READ(RADEON_SURFACE_CNTL);
1510 RADEON_WRITE(RADEON_SURFACE_CNTL, 0);
1511 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
1512 ret = r600_page_table_init(dev);
1513 else
1514 ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info);
1515 RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl);
1516
1517 if (!ret) {
1518 DRM_ERROR("failed to init PCI GART!\n");
1519 radeon_do_cleanup_cp(dev);
1520 return -ENOMEM;
1521 }
1522
1523 ret = radeon_setup_pcigart_surface(dev_priv);
1524 if (ret) {
1525 DRM_ERROR("failed to setup GART surface!\n");
1526 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
1527 r600_page_table_cleanup(dev, &dev_priv->gart_info);
1528 else
1529 drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info);
1530 radeon_do_cleanup_cp(dev);
1531 return ret;
1532 }
1533
1534 /* Turn on PCI GART */
1535 radeon_set_pcigart(dev_priv, 1);
1536 }
1537
1538 if (!dev_priv->me_fw) {
1539 int err = radeon_cp_init_microcode(dev_priv);
1540 if (err) {
1541 DRM_ERROR("Failed to load firmware!\n");
1542 radeon_do_cleanup_cp(dev);
1543 return err;
1544 }
1545 }
1546 radeon_cp_load_microcode(dev_priv);
1547 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
1548
1549 dev_priv->last_buf = 0;
1550
1551 radeon_do_engine_reset(dev);
1552 radeon_test_writeback(dev_priv);
1553
1554 return 0;
1555}
1556
1557static int radeon_do_cleanup_cp(struct drm_device * dev)
1558{
1559 drm_radeon_private_t *dev_priv = dev->dev_private;
1560 DRM_DEBUG("\n");
1561
1562 /* Make sure interrupts are disabled here because the uninstall ioctl
1563 * may not have been called from userspace and after dev_private
1564 * is freed, it's too late.
1565 */
1566 if (dev->irq_enabled)
1567 drm_irq_uninstall(dev);
1568
1569#if IS_ENABLED(CONFIG_AGP)
1570 if (dev_priv->flags & RADEON_IS_AGP) {
1571 if (dev_priv->cp_ring != NULL) {
1572 drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
1573 dev_priv->cp_ring = NULL;
1574 }
1575 if (dev_priv->ring_rptr != NULL) {
1576 drm_legacy_ioremapfree(dev_priv->ring_rptr, dev);
1577 dev_priv->ring_rptr = NULL;
1578 }
1579 if (dev->agp_buffer_map != NULL) {
1580 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
1581 dev->agp_buffer_map = NULL;
1582 }
1583 } else
1584#endif
1585 {
1586
1587 if (dev_priv->gart_info.bus_addr) {
1588 /* Turn off PCI GART */
1589 radeon_set_pcigart(dev_priv, 0);
1590 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
1591 r600_page_table_cleanup(dev, &dev_priv->gart_info);
1592 else {
1593 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
1594 DRM_ERROR("failed to cleanup PCI GART!\n");
1595 }
1596 }
1597
1598 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1599 {
1600 drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev);
1601 dev_priv->gart_info.addr = NULL;
1602 }
1603 }
1604 /* only clear to the start of flags */
1605 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1606
1607 return 0;
1608}
1609
1610/* This code will reinit the Radeon CP hardware after a resume from disc.
1611 * AFAIK, it would be very difficult to pickle the state at suspend time, so
1612 * here we make sure that all Radeon hardware initialisation is re-done without
1613 * affecting running applications.
1614 *
1615 * Charl P. Botha <http://cpbotha.net>
1616 */
1617static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
1618{
1619 drm_radeon_private_t *dev_priv = dev->dev_private;
1620
1621 if (!dev_priv) {
1622 DRM_ERROR("Called with no initialization\n");
1623 return -EINVAL;
1624 }
1625
1626 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1627
1628#if IS_ENABLED(CONFIG_AGP)
1629 if (dev_priv->flags & RADEON_IS_AGP) {
1630 /* Turn off PCI GART */
1631 radeon_set_pcigart(dev_priv, 0);
1632 } else
1633#endif
1634 {
1635 /* Turn on PCI GART */
1636 radeon_set_pcigart(dev_priv, 1);
1637 }
1638
1639 radeon_cp_load_microcode(dev_priv);
1640 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
1641
1642 dev_priv->have_z_offset = 0;
1643 radeon_do_engine_reset(dev);
1644 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1645
1646 DRM_DEBUG("radeon_do_resume_cp() complete\n");
1647
1648 return 0;
1649}
1650
1651int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
1652{
1653 drm_radeon_private_t *dev_priv = dev->dev_private;
1654 drm_radeon_init_t *init = data;
1655
1656 LOCK_TEST_WITH_RETURN(dev, file_priv);
1657
1658 if (init->func == RADEON_INIT_R300_CP)
1659 r300_init_reg_flags(dev);
1660
1661 switch (init->func) {
1662 case RADEON_INIT_CP:
1663 case RADEON_INIT_R200_CP:
1664 case RADEON_INIT_R300_CP:
1665 return radeon_do_init_cp(dev, init, file_priv);
1666 case RADEON_INIT_R600_CP:
1667 return r600_do_init_cp(dev, init, file_priv);
1668 case RADEON_CLEANUP_CP:
1669 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1670 return r600_do_cleanup_cp(dev);
1671 else
1672 return radeon_do_cleanup_cp(dev);
1673 }
1674
1675 return -EINVAL;
1676}
1677
1678int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
1679{
1680 drm_radeon_private_t *dev_priv = dev->dev_private;
1681 DRM_DEBUG("\n");
1682
1683 LOCK_TEST_WITH_RETURN(dev, file_priv);
1684
1685 if (dev_priv->cp_running) {
1686 DRM_DEBUG("while CP running\n");
1687 return 0;
1688 }
1689 if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
1690 DRM_DEBUG("called with bogus CP mode (%d)\n",
1691 dev_priv->cp_mode);
1692 return 0;
1693 }
1694
1695 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1696 r600_do_cp_start(dev_priv);
1697 else
1698 radeon_do_cp_start(dev_priv);
1699
1700 return 0;
1701}
1702
1703/* Stop the CP. The engine must have been idled before calling this
1704 * routine.
1705 */
1706int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
1707{
1708 drm_radeon_private_t *dev_priv = dev->dev_private;
1709 drm_radeon_cp_stop_t *stop = data;
1710 int ret;
1711 DRM_DEBUG("\n");
1712
1713 LOCK_TEST_WITH_RETURN(dev, file_priv);
1714
1715 if (!dev_priv->cp_running)
1716 return 0;
1717
1718 /* Flush any pending CP commands. This ensures any outstanding
1719 * commands are exectuted by the engine before we turn it off.
1720 */
1721 if (stop->flush) {
1722 radeon_do_cp_flush(dev_priv);
1723 }
1724
1725 /* If we fail to make the engine go idle, we return an error
1726 * code so that the DRM ioctl wrapper can try again.
1727 */
1728 if (stop->idle) {
1729 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1730 ret = r600_do_cp_idle(dev_priv);
1731 else
1732 ret = radeon_do_cp_idle(dev_priv);
1733 if (ret)
1734 return ret;
1735 }
1736
1737 /* Finally, we can turn off the CP. If the engine isn't idle,
1738 * we will get some dropped triangles as they won't be fully
1739 * rendered before the CP is shut down.
1740 */
1741 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1742 r600_do_cp_stop(dev_priv);
1743 else
1744 radeon_do_cp_stop(dev_priv);
1745
1746 /* Reset the engine */
1747 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1748 r600_do_engine_reset(dev);
1749 else
1750 radeon_do_engine_reset(dev);
1751
1752 return 0;
1753}
1754
1755void radeon_do_release(struct drm_device * dev)
1756{
1757 drm_radeon_private_t *dev_priv = dev->dev_private;
1758 int i, ret;
1759
1760 if (dev_priv) {
1761 if (dev_priv->cp_running) {
1762 /* Stop the cp */
1763 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
1764 while ((ret = r600_do_cp_idle(dev_priv)) != 0) {
1765 DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
1766#ifdef __linux__
1767 schedule();
1768#else
1769 tsleep(&ret, PZERO, "rdnrel", 1);
1770#endif
1771 }
1772 } else {
1773 while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
1774 DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
1775#ifdef __linux__
1776 schedule();
1777#else
1778 tsleep(&ret, PZERO, "rdnrel", 1);
1779#endif
1780 }
1781 }
1782 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
1783 r600_do_cp_stop(dev_priv);
1784 r600_do_engine_reset(dev);
1785 } else {
1786 radeon_do_cp_stop(dev_priv);
1787 radeon_do_engine_reset(dev);
1788 }
1789 }
1790
1791 if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) {
1792 /* Disable *all* interrupts */
1793 if (dev_priv->mmio) /* remove this after permanent addmaps */
1794 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
1795
1796 if (dev_priv->mmio) { /* remove all surfaces */
1797 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1798 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
1799 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
1800 16 * i, 0);
1801 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
1802 16 * i, 0);
1803 }
1804 }
1805 }
1806
1807 /* Free memory heap structures */
1808 radeon_mem_takedown(&(dev_priv->gart_heap));
1809 radeon_mem_takedown(&(dev_priv->fb_heap));
1810
1811 /* deallocate kernel resources */
1812 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1813 r600_do_cleanup_cp(dev);
1814 else
1815 radeon_do_cleanup_cp(dev);
1816 release_firmware(dev_priv->me_fw);
1817 dev_priv->me_fw = NULL;
1818 release_firmware(dev_priv->pfp_fw);
1819 dev_priv->pfp_fw = NULL;
1820 }
1821}
1822
1823/* Just reset the CP ring. Called as part of an X Server engine reset.
1824 */
1825int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1826{
1827 drm_radeon_private_t *dev_priv = dev->dev_private;
1828 DRM_DEBUG("\n");
1829
1830 LOCK_TEST_WITH_RETURN(dev, file_priv);
1831
1832 if (!dev_priv) {
1833 DRM_DEBUG("called before init done\n");
1834 return -EINVAL;
1835 }
1836
1837 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1838 r600_do_cp_reset(dev_priv);
1839 else
1840 radeon_do_cp_reset(dev_priv);
1841
1842 /* The CP is no longer running after an engine reset */
1843 dev_priv->cp_running = 0;
1844
1845 return 0;
1846}
1847
1848int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
1849{
1850 drm_radeon_private_t *dev_priv = dev->dev_private;
1851 DRM_DEBUG("\n");
1852
1853 LOCK_TEST_WITH_RETURN(dev, file_priv);
1854
1855 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1856 return r600_do_cp_idle(dev_priv);
1857 else
1858 return radeon_do_cp_idle(dev_priv);
1859}
1860
1861/* Added by Charl P. Botha to call radeon_do_resume_cp().
1862 */
1863int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
1864{
1865 drm_radeon_private_t *dev_priv = dev->dev_private;
1866 DRM_DEBUG("\n");
1867
1868 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1869 return r600_do_resume_cp(dev, file_priv);
1870 else
1871 return radeon_do_resume_cp(dev, file_priv);
1872}
1873
1874int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1875{
1876 drm_radeon_private_t *dev_priv = dev->dev_private;
1877 DRM_DEBUG("\n");
1878
1879 LOCK_TEST_WITH_RETURN(dev, file_priv);
1880
1881 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
1882 return r600_do_engine_reset(dev);
1883 else
1884 return radeon_do_engine_reset(dev);
1885}
1886
1887/* ================================================================
1888 * Fullscreen mode
1889 */
1890
1891/* KW: Deprecated to say the least:
1892 */
1893int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
1894{
1895 return 0;
1896}
1897
1898/* ================================================================
1899 * Freelist management
1900 */
1901
1902/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
1903 * bufs until freelist code is used. Note this hides a problem with
1904 * the scratch register * (used to keep track of last buffer
1905 * completed) being written to before * the last buffer has actually
1906 * completed rendering.
1907 *
1908 * KW: It's also a good way to find free buffers quickly.
1909 *
1910 * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
1911 * sleep. However, bugs in older versions of radeon_accel.c mean that
1912 * we essentially have to do this, else old clients will break.
1913 *
1914 * However, it does leave open a potential deadlock where all the
1915 * buffers are held by other clients, which can't release them because
1916 * they can't get the lock.
1917 */
1918
1919struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1920{
1921 struct drm_device_dma *dma = dev->dma;
1922 drm_radeon_private_t *dev_priv = dev->dev_private;
1923 drm_radeon_buf_priv_t *buf_priv;
1924 struct drm_buf *buf;
1925 int i, t;
1926 int start;
1927
1928 if (++dev_priv->last_buf >= dma->buf_count)
1929 dev_priv->last_buf = 0;
1930
1931 start = dev_priv->last_buf;
1932
1933 for (t = 0; t < dev_priv->usec_timeout; t++) {
1934 u32 done_age = GET_SCRATCH(dev_priv, 1);
1935 DRM_DEBUG("done_age = %d\n", done_age);
1936 for (i = 0; i < dma->buf_count; i++) {
1937 buf = dma->buflist[start];
1938 buf_priv = buf->dev_private;
1939 if (buf->file_priv == NULL || (buf->pending &&
1940 buf_priv->age <=
1941 done_age)) {
1942 dev_priv->stats.requested_bufs++;
1943 buf->pending = 0;
1944 return buf;
1945 }
1946 if (++start >= dma->buf_count)
1947 start = 0;
1948 }
1949
1950 if (t) {
1951 DRM_UDELAY(1);
1952 dev_priv->stats.freelist_loops++;
1953 }
1954 }
1955
1956 return NULL;
1957}
1958
1959void radeon_freelist_reset(struct drm_device * dev)
1960{
1961 struct drm_device_dma *dma = dev->dma;
1962 drm_radeon_private_t *dev_priv = dev->dev_private;
1963 int i;
1964
1965 dev_priv->last_buf = 0;
1966 for (i = 0; i < dma->buf_count; i++) {
1967 struct drm_buf *buf = dma->buflist[i];
1968 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1969 buf_priv->age = 0;
1970 }
1971}
1972
1973/* ================================================================
1974 * CP command submission
1975 */
1976
1977int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
1978{
1979 drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
1980 int i;
1981 u32 last_head = GET_RING_HEAD(dev_priv);
1982
1983 for (i = 0; i < dev_priv->usec_timeout; i++) {
1984 u32 head = GET_RING_HEAD(dev_priv);
1985
1986 ring->space = (head - ring->tail) * sizeof(u32);
1987 if (ring->space <= 0)
1988 ring->space += ring->size;
1989 if (ring->space > n)
1990 return 0;
1991
1992 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
1993
1994 if (head != last_head)
1995 i = 0;
1996 last_head = head;
1997
1998 DRM_UDELAY(1);
1999 }
2000
2001 /* FIXME: This return value is ignored in the BEGIN_RING macro! */
2002#if RADEON_FIFO_DEBUG
2003 radeon_status(dev_priv);
2004 DRM_ERROR("failed!\n");
2005#endif
2006 return -EBUSY;
2007}
2008
2009static int radeon_cp_get_buffers(struct drm_device *dev,
2010 struct drm_file *file_priv,
2011 struct drm_dma * d)
2012{
2013 int i;
2014 struct drm_buf *buf;
2015
2016 for (i = d->granted_count; i < d->request_count; i++) {
2017 buf = radeon_freelist_get(dev);
2018 if (!buf)
2019 return -EBUSY; /* NOTE: broken client */
2020
2021 buf->file_priv = file_priv;
2022
2023 if (copy_to_user(&d->request_indices[i], &buf->idx,
2024 sizeof(buf->idx)))
2025 return -EFAULT;
2026 if (copy_to_user(&d->request_sizes[i], &buf->total,
2027 sizeof(buf->total)))
2028 return -EFAULT;
2029
2030 d->granted_count++;
2031 }
2032 return 0;
2033}
2034
2035int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
2036{
2037 struct drm_device_dma *dma = dev->dma;
2038 int ret = 0;
2039 struct drm_dma *d = data;
2040
2041 LOCK_TEST_WITH_RETURN(dev, file_priv);
2042
2043 /* Please don't send us buffers.
2044 */
2045 if (d->send_count != 0) {
2046 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
2047 DRM_CURRENTPID, d->send_count);
2048 return -EINVAL;
2049 }
2050
2051 /* We'll send you buffers.
2052 */
2053 if (d->request_count < 0 || d->request_count > dma->buf_count) {
2054 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
2055 DRM_CURRENTPID, d->request_count, dma->buf_count);
2056 return -EINVAL;
2057 }
2058
2059 d->granted_count = 0;
2060
2061 if (d->request_count) {
2062 ret = radeon_cp_get_buffers(dev, file_priv, d);
2063 }
2064
2065 return ret;
2066}
2067
2068int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2069{
2070 drm_radeon_private_t *dev_priv;
2071 int ret = 0;
2072
2073 dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL);
2074 if (dev_priv == NULL)
2075 return -ENOMEM;
2076
2077 dev->dev_private = (void *)dev_priv;
2078 dev_priv->flags = flags;
2079
2080 switch (flags & RADEON_FAMILY_MASK) {
2081 case CHIP_R100:
2082 case CHIP_RV200:
2083 case CHIP_R200:
2084 case CHIP_R300:
2085 case CHIP_R350:
2086 case CHIP_R420:
2087 case CHIP_R423:
2088 case CHIP_RV410:
2089 case CHIP_RV515:
2090 case CHIP_R520:
2091 case CHIP_RV570:
2092 case CHIP_R580:
2093 dev_priv->flags |= RADEON_HAS_HIERZ;
2094 break;
2095 default:
2096 /* all other chips have no hierarchical z buffer */
2097 break;
2098 }
2099
2100 pci_set_master(dev->pdev);
2101
2102 if (drm_pci_device_is_agp(dev))
2103 dev_priv->flags |= RADEON_IS_AGP;
2104 else if (pci_is_pcie(dev->pdev))
2105 dev_priv->flags |= RADEON_IS_PCIE;
2106 else
2107 dev_priv->flags |= RADEON_IS_PCI;
2108
2109 ret = drm_legacy_addmap(dev, pci_resource_start(dev->pdev, 2),
2110 pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
2111 _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
2112 if (ret != 0)
2113 return ret;
2114
2115 ret = drm_vblank_init(dev, 2);
2116 if (ret) {
2117 radeon_driver_unload(dev);
2118 return ret;
2119 }
2120
2121 DRM_DEBUG("%s card detected\n",
2122 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
2123 return ret;
2124}
2125
2126int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2127{
2128 struct drm_radeon_master_private *master_priv;
2129 unsigned long sareapage;
2130 int ret;
2131
2132 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
2133 if (!master_priv)
2134 return -ENOMEM;
2135
2136 /* prebuild the SAREA */
2137 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
2138 ret = drm_legacy_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
2139 &master_priv->sarea);
2140 if (ret) {
2141 DRM_ERROR("SAREA setup failed\n");
2142 kfree(master_priv);
2143 return ret;
2144 }
2145 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
2146 master_priv->sarea_priv->pfCurrentPage = 0;
2147
2148 master->driver_priv = master_priv;
2149 return 0;
2150}
2151
2152void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
2153{
2154 struct drm_radeon_master_private *master_priv = master->driver_priv;
2155
2156 if (!master_priv)
2157 return;
2158
2159 if (master_priv->sarea_priv &&
2160 master_priv->sarea_priv->pfCurrentPage != 0)
2161 radeon_cp_dispatch_flip(dev, master);
2162
2163 master_priv->sarea_priv = NULL;
2164 if (master_priv->sarea)
2165 drm_legacy_rmmap_locked(dev, master_priv->sarea);
2166
2167 kfree(master_priv);
2168
2169 master->driver_priv = NULL;
2170}
2171
2172/* Create mappings for registers and framebuffer so userland doesn't necessarily
2173 * have to find them.
2174 */
2175int radeon_driver_firstopen(struct drm_device *dev)
2176{
2177 int ret;
2178 drm_local_map_t *map;
2179 drm_radeon_private_t *dev_priv = dev->dev_private;
2180
2181 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
2182
2183 dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
2184 ret = drm_legacy_addmap(dev, dev_priv->fb_aper_offset,
2185 pci_resource_len(dev->pdev, 0),
2186 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map);
2187 if (ret != 0)
2188 return ret;
2189
2190 return 0;
2191}
2192
2193int radeon_driver_unload(struct drm_device *dev)
2194{
2195 drm_radeon_private_t *dev_priv = dev->dev_private;
2196
2197 DRM_DEBUG("\n");
2198
2199 drm_legacy_rmmap(dev, dev_priv->mmio);
2200
2201 kfree(dev_priv);
2202
2203 dev->dev_private = NULL;
2204 return 0;
2205}
2206
2207void radeon_commit_ring(drm_radeon_private_t *dev_priv)
2208{
2209 int i;
2210 u32 *ring;
2211 int tail_aligned;
2212
2213 /* check if the ring is padded out to 16-dword alignment */
2214
2215 tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
2216 if (tail_aligned) {
2217 int num_p2 = RADEON_RING_ALIGN - tail_aligned;
2218
2219 ring = dev_priv->ring.start;
2220 /* pad with some CP_PACKET2 */
2221 for (i = 0; i < num_p2; i++)
2222 ring[dev_priv->ring.tail + i] = CP_PACKET2();
2223
2224 dev_priv->ring.tail += i;
2225
2226 dev_priv->ring.space -= num_p2 * sizeof(u32);
2227 }
2228
2229 dev_priv->ring.tail &= dev_priv->ring.tail_mask;
2230
2231 mb();
2232 GET_RING_HEAD( dev_priv );
2233
2234 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
2235 RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail);
2236 /* read from PCI bus to ensure correct posting */
2237 RADEON_READ(R600_CP_RB_RPTR);
2238 } else {
2239 RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail);
2240 /* read from PCI bus to ensure correct posting */
2241 RADEON_READ(RADEON_CP_RB_RPTR);
2242 }
2243}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c566993a2ec3..902b59cebac5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1150,7 +1150,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1150 } 1150 }
1151 1151
1152 if (radeon_vm_size < 1) { 1152 if (radeon_vm_size < 1) {
1153 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n", 1153 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1154 radeon_vm_size); 1154 radeon_vm_size);
1155 radeon_vm_size = 4; 1155 radeon_vm_size = 4;
1156 } 1156 }
@@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1744 } 1744 }
1745 1745
1746 drm_kms_helper_poll_enable(dev); 1746 drm_kms_helper_poll_enable(dev);
1747 drm_helper_hpd_irq_event(dev);
1747 1748
1748 /* set the power state here in case we are a PX system or headless */ 1749 /* set the power state here in case we are a PX system or headless */
1749 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 1750 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 744f5c49c664..df7a1719c841 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -329,7 +329,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
329 drm_kms_helper_hotplug_event(dev); 329 drm_kms_helper_hotplug_event(dev);
330} 330}
331 331
332struct drm_dp_mst_topology_cbs mst_cbs = { 332const struct drm_dp_mst_topology_cbs mst_cbs = {
333 .add_connector = radeon_dp_add_mst_connector, 333 .add_connector = radeon_dp_add_mst_connector,
334 .register_connector = radeon_dp_register_mst_connector, 334 .register_connector = radeon_dp_register_mst_connector,
335 .destroy_connector = radeon_dp_destroy_mst_connector, 335 .destroy_connector = radeon_dp_destroy_mst_connector,
@@ -525,11 +525,17 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
525 drm_mode_set_crtcinfo(adjusted_mode, 0); 525 drm_mode_set_crtcinfo(adjusted_mode, 0);
526 { 526 {
527 struct radeon_connector_atom_dig *dig_connector; 527 struct radeon_connector_atom_dig *dig_connector;
528 int ret;
528 529
529 dig_connector = mst_enc->connector->con_priv; 530 dig_connector = mst_enc->connector->con_priv;
530 dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd); 531 ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
531 dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base, 532 dig_connector->dpcd, adjusted_mode->clock,
532 dig_connector->dpcd); 533 &dig_connector->dp_lane_count,
534 &dig_connector->dp_clock);
535 if (ret) {
536 dig_connector->dp_lane_count = 0;
537 dig_connector->dp_clock = 0;
538 }
533 DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, 539 DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
534 dig_connector->dp_lane_count, dig_connector->dp_clock); 540 dig_connector->dp_lane_count, dig_connector->dp_clock);
535 } 541 }
@@ -641,7 +647,7 @@ radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
641 } 647 }
642 648
643 drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs, 649 drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
644 DRM_MODE_ENCODER_DPMST); 650 DRM_MODE_ENCODER_DPMST, NULL);
645 drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs); 651 drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
646 652
647 mst_enc = radeon_encoder->enc_priv; 653 mst_enc = radeon_encoder->enc_priv;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 5b6a6f5b3619..e266ffc520d2 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -291,88 +291,6 @@ static struct pci_device_id pciidlist[] = {
291 291
292MODULE_DEVICE_TABLE(pci, pciidlist); 292MODULE_DEVICE_TABLE(pci, pciidlist);
293 293
294#ifdef CONFIG_DRM_RADEON_UMS
295
296static int radeon_suspend(struct drm_device *dev, pm_message_t state)
297{
298 drm_radeon_private_t *dev_priv = dev->dev_private;
299
300 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
301 return 0;
302
303 /* Disable *all* interrupts */
304 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
305 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
306 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
307 return 0;
308}
309
310static int radeon_resume(struct drm_device *dev)
311{
312 drm_radeon_private_t *dev_priv = dev->dev_private;
313
314 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
315 return 0;
316
317 /* Restore interrupt registers */
318 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
319 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
320 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
321 return 0;
322}
323
324
325static const struct file_operations radeon_driver_old_fops = {
326 .owner = THIS_MODULE,
327 .open = drm_open,
328 .release = drm_release,
329 .unlocked_ioctl = drm_ioctl,
330 .mmap = drm_legacy_mmap,
331 .poll = drm_poll,
332 .read = drm_read,
333#ifdef CONFIG_COMPAT
334 .compat_ioctl = radeon_compat_ioctl,
335#endif
336 .llseek = noop_llseek,
337};
338
339static struct drm_driver driver_old = {
340 .driver_features =
341 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
342 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
343 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
344 .load = radeon_driver_load,
345 .firstopen = radeon_driver_firstopen,
346 .open = radeon_driver_open,
347 .preclose = radeon_driver_preclose,
348 .postclose = radeon_driver_postclose,
349 .lastclose = radeon_driver_lastclose,
350 .set_busid = drm_pci_set_busid,
351 .unload = radeon_driver_unload,
352 .suspend = radeon_suspend,
353 .resume = radeon_resume,
354 .get_vblank_counter = radeon_get_vblank_counter,
355 .enable_vblank = radeon_enable_vblank,
356 .disable_vblank = radeon_disable_vblank,
357 .master_create = radeon_master_create,
358 .master_destroy = radeon_master_destroy,
359 .irq_preinstall = radeon_driver_irq_preinstall,
360 .irq_postinstall = radeon_driver_irq_postinstall,
361 .irq_uninstall = radeon_driver_irq_uninstall,
362 .irq_handler = radeon_driver_irq_handler,
363 .ioctls = radeon_ioctls,
364 .dma_ioctl = radeon_cp_buffers,
365 .fops = &radeon_driver_old_fops,
366 .name = DRIVER_NAME,
367 .desc = DRIVER_DESC,
368 .date = DRIVER_DATE,
369 .major = DRIVER_MAJOR,
370 .minor = DRIVER_MINOR,
371 .patchlevel = DRIVER_PATCHLEVEL,
372};
373
374#endif
375
376static struct drm_driver kms_driver; 294static struct drm_driver kms_driver;
377 295
378static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) 296static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
@@ -619,13 +537,6 @@ static struct drm_driver kms_driver = {
619static struct drm_driver *driver; 537static struct drm_driver *driver;
620static struct pci_driver *pdriver; 538static struct pci_driver *pdriver;
621 539
622#ifdef CONFIG_DRM_RADEON_UMS
623static struct pci_driver radeon_pci_driver = {
624 .name = DRIVER_NAME,
625 .id_table = pciidlist,
626};
627#endif
628
629static struct pci_driver radeon_kms_pci_driver = { 540static struct pci_driver radeon_kms_pci_driver = {
630 .name = DRIVER_NAME, 541 .name = DRIVER_NAME,
631 .id_table = pciidlist, 542 .id_table = pciidlist,
@@ -655,16 +566,8 @@ static int __init radeon_init(void)
655 radeon_register_atpx_handler(); 566 radeon_register_atpx_handler();
656 567
657 } else { 568 } else {
658#ifdef CONFIG_DRM_RADEON_UMS
659 DRM_INFO("radeon userspace modesetting enabled.\n");
660 driver = &driver_old;
661 pdriver = &radeon_pci_driver;
662 driver->driver_features &= ~DRIVER_MODESET;
663 driver->num_ioctls = radeon_max_ioctl;
664#else
665 DRM_ERROR("No UMS support in radeon module!\n"); 569 DRM_ERROR("No UMS support in radeon module!\n");
666 return -EINVAL; 570 return -EINVAL;
667#endif
668 } 571 }
669 572
670 radeon_kfd_init(); 573 radeon_kfd_init();
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 0caafc7a6e17..afef2d9fccd8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -119,2052 +119,4 @@
119long radeon_drm_ioctl(struct file *filp, 119long radeon_drm_ioctl(struct file *filp,
120 unsigned int cmd, unsigned long arg); 120 unsigned int cmd, unsigned long arg);
121 121
122/* The rest of the file is DEPRECATED! */
123#ifdef CONFIG_DRM_RADEON_UMS
124
125enum radeon_cp_microcode_version {
126 UCODE_R100,
127 UCODE_R200,
128 UCODE_R300,
129};
130
131typedef struct drm_radeon_freelist {
132 unsigned int age;
133 struct drm_buf *buf;
134 struct drm_radeon_freelist *next;
135 struct drm_radeon_freelist *prev;
136} drm_radeon_freelist_t;
137
138typedef struct drm_radeon_ring_buffer {
139 u32 *start;
140 u32 *end;
141 int size;
142 int size_l2qw;
143
144 int rptr_update; /* Double Words */
145 int rptr_update_l2qw; /* log2 Quad Words */
146
147 int fetch_size; /* Double Words */
148 int fetch_size_l2ow; /* log2 Oct Words */
149
150 u32 tail;
151 u32 tail_mask;
152 int space;
153
154 int high_mark;
155} drm_radeon_ring_buffer_t;
156
157typedef struct drm_radeon_depth_clear_t {
158 u32 rb3d_cntl;
159 u32 rb3d_zstencilcntl;
160 u32 se_cntl;
161} drm_radeon_depth_clear_t;
162
163struct drm_radeon_driver_file_fields {
164 int64_t radeon_fb_delta;
165};
166
167struct mem_block {
168 struct mem_block *next;
169 struct mem_block *prev;
170 int start;
171 int size;
172 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
173};
174
175struct radeon_surface {
176 int refcount;
177 u32 lower;
178 u32 upper;
179 u32 flags;
180};
181
182struct radeon_virt_surface {
183 int surface_index;
184 u32 lower;
185 u32 upper;
186 u32 flags;
187 struct drm_file *file_priv;
188#define PCIGART_FILE_PRIV ((void *) -1L)
189};
190
191#define RADEON_FLUSH_EMITED (1 << 0)
192#define RADEON_PURGE_EMITED (1 << 1)
193
194struct drm_radeon_master_private {
195 drm_local_map_t *sarea;
196 drm_radeon_sarea_t *sarea_priv;
197};
198
199typedef struct drm_radeon_private {
200 drm_radeon_ring_buffer_t ring;
201
202 u32 fb_location;
203 u32 fb_size;
204 int new_memmap;
205
206 int gart_size;
207 u32 gart_vm_start;
208 unsigned long gart_buffers_offset;
209
210 int cp_mode;
211 int cp_running;
212
213 drm_radeon_freelist_t *head;
214 drm_radeon_freelist_t *tail;
215 int last_buf;
216 int writeback_works;
217
218 int usec_timeout;
219
220 int microcode_version;
221
222 struct {
223 u32 boxes;
224 int freelist_timeouts;
225 int freelist_loops;
226 int requested_bufs;
227 int last_frame_reads;
228 int last_clear_reads;
229 int clears;
230 int texture_uploads;
231 } stats;
232
233 int do_boxes;
234 int page_flipping;
235
236 u32 color_fmt;
237 unsigned int front_offset;
238 unsigned int front_pitch;
239 unsigned int back_offset;
240 unsigned int back_pitch;
241
242 u32 depth_fmt;
243 unsigned int depth_offset;
244 unsigned int depth_pitch;
245
246 u32 front_pitch_offset;
247 u32 back_pitch_offset;
248 u32 depth_pitch_offset;
249
250 drm_radeon_depth_clear_t depth_clear;
251
252 unsigned long ring_offset;
253 unsigned long ring_rptr_offset;
254 unsigned long buffers_offset;
255 unsigned long gart_textures_offset;
256
257 drm_local_map_t *sarea;
258 drm_local_map_t *cp_ring;
259 drm_local_map_t *ring_rptr;
260 drm_local_map_t *gart_textures;
261
262 struct mem_block *gart_heap;
263 struct mem_block *fb_heap;
264
265 /* SW interrupt */
266 wait_queue_head_t swi_queue;
267 atomic_t swi_emitted;
268 int vblank_crtc;
269 uint32_t irq_enable_reg;
270 uint32_t r500_disp_irq_reg;
271
272 struct radeon_surface surfaces[RADEON_MAX_SURFACES];
273 struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
274
275 unsigned long pcigart_offset;
276 unsigned int pcigart_offset_set;
277 struct drm_ati_pcigart_info gart_info;
278
279 u32 scratch_ages[5];
280
281 int have_z_offset;
282
283 /* starting from here on, data is preserved across an open */
284 uint32_t flags; /* see radeon_chip_flags */
285 resource_size_t fb_aper_offset;
286
287 int num_gb_pipes;
288 int num_z_pipes;
289 int track_flush;
290 drm_local_map_t *mmio;
291
292 /* r6xx/r7xx pipe/shader config */
293 int r600_max_pipes;
294 int r600_max_tile_pipes;
295 int r600_max_simds;
296 int r600_max_backends;
297 int r600_max_gprs;
298 int r600_max_threads;
299 int r600_max_stack_entries;
300 int r600_max_hw_contexts;
301 int r600_max_gs_threads;
302 int r600_sx_max_export_size;
303 int r600_sx_max_export_pos_size;
304 int r600_sx_max_export_smx_size;
305 int r600_sq_num_cf_insts;
306 int r700_sx_num_of_sets;
307 int r700_sc_prim_fifo_size;
308 int r700_sc_hiz_tile_fifo_size;
309 int r700_sc_earlyz_tile_fifo_fize;
310 int r600_group_size;
311 int r600_npipes;
312 int r600_nbanks;
313
314 struct mutex cs_mutex;
315 u32 cs_id_scnt;
316 u32 cs_id_wcnt;
317 /* r6xx/r7xx drm blit vertex buffer */
318 struct drm_buf *blit_vb;
319
320 /* firmware */
321 const struct firmware *me_fw, *pfp_fw;
322} drm_radeon_private_t;
323
324typedef struct drm_radeon_buf_priv {
325 u32 age;
326} drm_radeon_buf_priv_t;
327
328struct drm_buffer;
329
330typedef struct drm_radeon_kcmd_buffer {
331 int bufsz;
332 struct drm_buffer *buffer;
333 int nbox;
334 struct drm_clip_rect __user *boxes;
335} drm_radeon_kcmd_buffer_t;
336
337extern int radeon_no_wb;
338extern struct drm_ioctl_desc radeon_ioctls[];
339extern int radeon_max_ioctl;
340
341extern u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv);
342extern void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val);
343
344#define GET_RING_HEAD(dev_priv) radeon_get_ring_head(dev_priv)
345#define SET_RING_HEAD(dev_priv, val) radeon_set_ring_head(dev_priv, val)
346
347/* Check whether the given hardware address is inside the framebuffer or the
348 * GART area.
349 */
350static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
351 u64 off)
352{
353 u32 fb_start = dev_priv->fb_location;
354 u32 fb_end = fb_start + dev_priv->fb_size - 1;
355 u32 gart_start = dev_priv->gart_vm_start;
356 u32 gart_end = gart_start + dev_priv->gart_size - 1;
357
358 return ((off >= fb_start && off <= fb_end) ||
359 (off >= gart_start && off <= gart_end));
360}
361
362/* radeon_state.c */
363extern void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf);
364
365 /* radeon_cp.c */
366extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
367extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
368extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
369extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
370extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
371extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
372extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
373extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
374extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
375extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
376extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
377extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
378
379extern void radeon_freelist_reset(struct drm_device * dev);
380extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
381
382extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
383
384extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
385
386extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
387extern int radeon_presetup(struct drm_device *dev);
388extern int radeon_driver_postcleanup(struct drm_device *dev);
389
390extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
391extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
392extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
393extern void radeon_mem_takedown(struct mem_block **heap);
394extern void radeon_mem_release(struct drm_file *file_priv,
395 struct mem_block *heap);
396
397extern void radeon_enable_bm(struct drm_radeon_private *dev_priv);
398extern u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off);
399extern void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val);
400
401 /* radeon_irq.c */
402extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
403extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
404extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
405
406extern void radeon_do_release(struct drm_device * dev);
407extern u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
408extern int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe);
409extern void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe);
410extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg);
411extern void radeon_driver_irq_preinstall(struct drm_device * dev);
412extern int radeon_driver_irq_postinstall(struct drm_device *dev);
413extern void radeon_driver_irq_uninstall(struct drm_device * dev);
414extern void radeon_enable_interrupt(struct drm_device *dev);
415extern int radeon_vblank_crtc_get(struct drm_device *dev);
416extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
417
418extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
419extern int radeon_driver_unload(struct drm_device *dev);
420extern int radeon_driver_firstopen(struct drm_device *dev);
421extern void radeon_driver_preclose(struct drm_device *dev,
422 struct drm_file *file_priv);
423extern void radeon_driver_postclose(struct drm_device *dev,
424 struct drm_file *file_priv);
425extern void radeon_driver_lastclose(struct drm_device * dev);
426extern int radeon_driver_open(struct drm_device *dev,
427 struct drm_file *file_priv);
428extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
429 unsigned long arg);
430
431extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
432extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
433extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master);
434/* r300_cmdbuf.c */
435extern void r300_init_reg_flags(struct drm_device *dev);
436
437extern int r300_do_cp_cmdbuf(struct drm_device *dev,
438 struct drm_file *file_priv,
439 drm_radeon_kcmd_buffer_t *cmdbuf);
440
441/* r600_cp.c */
442extern int r600_do_engine_reset(struct drm_device *dev);
443extern int r600_do_cleanup_cp(struct drm_device *dev);
444extern int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
445 struct drm_file *file_priv);
446extern int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv);
447extern int r600_do_cp_idle(drm_radeon_private_t *dev_priv);
448extern void r600_do_cp_start(drm_radeon_private_t *dev_priv);
449extern void r600_do_cp_reset(drm_radeon_private_t *dev_priv);
450extern void r600_do_cp_stop(drm_radeon_private_t *dev_priv);
451extern int r600_cp_dispatch_indirect(struct drm_device *dev,
452 struct drm_buf *buf, int start, int end);
453extern int r600_page_table_init(struct drm_device *dev);
454extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
455extern int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
456extern void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv);
457extern int r600_cp_dispatch_texture(struct drm_device *dev,
458 struct drm_file *file_priv,
459 drm_radeon_texture_t *tex,
460 drm_radeon_tex_image_t *image);
461/* r600_blit.c */
462extern int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv);
463extern void r600_done_blit_copy(struct drm_device *dev);
464extern void r600_blit_copy(struct drm_device *dev,
465 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
466 int size_bytes);
467extern void r600_blit_swap(struct drm_device *dev,
468 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
469 int sx, int sy, int dx, int dy,
470 int w, int h, int src_pitch, int dst_pitch, int cpp);
471
472/* Flags for stats.boxes
473 */
474#define RADEON_BOX_DMA_IDLE 0x1
475#define RADEON_BOX_RING_FULL 0x2
476#define RADEON_BOX_FLIP 0x4
477#define RADEON_BOX_WAIT_IDLE 0x8
478#define RADEON_BOX_TEXTURE_LOAD 0x10
479
480/* Register definitions, register access macros and drmAddMap constants
481 * for Radeon kernel driver.
482 */
483#define RADEON_MM_INDEX 0x0000
484#define RADEON_MM_DATA 0x0004
485
486#define RADEON_AGP_COMMAND 0x0f60
487#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
488# define RADEON_AGP_ENABLE (1<<8)
489#define RADEON_AUX_SCISSOR_CNTL 0x26f0
490# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
491# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
492# define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26)
493# define RADEON_SCISSOR_0_ENABLE (1 << 28)
494# define RADEON_SCISSOR_1_ENABLE (1 << 29)
495# define RADEON_SCISSOR_2_ENABLE (1 << 30)
496
497/*
498 * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
499 * don't have an explicit bus mastering disable bit. It's handled
500 * by the PCI D-states. PMI_BM_DIS disables D-state bus master
501 * handling, not bus mastering itself.
502 */
503#define RADEON_BUS_CNTL 0x0030
504/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
505# define RADEON_BUS_MASTER_DIS (1 << 6)
506/* rs600/rs690/rs740 */
507# define RS600_BUS_MASTER_DIS (1 << 14)
508# define RS600_MSI_REARM (1 << 20)
509/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
510
511#define RADEON_BUS_CNTL1 0x0034
512# define RADEON_PMI_BM_DIS (1 << 2)
513# define RADEON_PMI_INT_DIS (1 << 3)
514
515#define RV370_BUS_CNTL 0x004c
516# define RV370_PMI_BM_DIS (1 << 5)
517# define RV370_PMI_INT_DIS (1 << 6)
518
519#define RADEON_MSI_REARM_EN 0x0160
520/* rv370/rv380, rv410, r423/r430/r480, r5xx */
521# define RV370_MSI_REARM_EN (1 << 0)
522
523#define RADEON_CLOCK_CNTL_DATA 0x000c
524# define RADEON_PLL_WR_EN (1 << 7)
525#define RADEON_CLOCK_CNTL_INDEX 0x0008
526#define RADEON_CONFIG_APER_SIZE 0x0108
527#define RADEON_CONFIG_MEMSIZE 0x00f8
528#define RADEON_CRTC_OFFSET 0x0224
529#define RADEON_CRTC_OFFSET_CNTL 0x0228
530# define RADEON_CRTC_TILE_EN (1 << 15)
531# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
532#define RADEON_CRTC2_OFFSET 0x0324
533#define RADEON_CRTC2_OFFSET_CNTL 0x0328
534
535#define RADEON_PCIE_INDEX 0x0030
536#define RADEON_PCIE_DATA 0x0034
537#define RADEON_PCIE_TX_GART_CNTL 0x10
538# define RADEON_PCIE_TX_GART_EN (1 << 0)
539# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
540# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
541# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
542# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
543# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
544# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
545# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
546#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
547#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
548#define RADEON_PCIE_TX_GART_BASE 0x13
549#define RADEON_PCIE_TX_GART_START_LO 0x14
550#define RADEON_PCIE_TX_GART_START_HI 0x15
551#define RADEON_PCIE_TX_GART_END_LO 0x16
552#define RADEON_PCIE_TX_GART_END_HI 0x17
553
554#define RS480_NB_MC_INDEX 0x168
555# define RS480_NB_MC_IND_WR_EN (1 << 8)
556#define RS480_NB_MC_DATA 0x16c
557
558#define RS690_MC_INDEX 0x78
559# define RS690_MC_INDEX_MASK 0x1ff
560# define RS690_MC_INDEX_WR_EN (1 << 9)
561# define RS690_MC_INDEX_WR_ACK 0x7f
562#define RS690_MC_DATA 0x7c
563
564/* MC indirect registers */
565#define RS480_MC_MISC_CNTL 0x18
566# define RS480_DISABLE_GTW (1 << 1)
567/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
568# define RS480_GART_INDEX_REG_EN (1 << 12)
569# define RS690_BLOCK_GFX_D3_EN (1 << 14)
570#define RS480_K8_FB_LOCATION 0x1e
571#define RS480_GART_FEATURE_ID 0x2b
572# define RS480_HANG_EN (1 << 11)
573# define RS480_TLB_ENABLE (1 << 18)
574# define RS480_P2P_ENABLE (1 << 19)
575# define RS480_GTW_LAC_EN (1 << 25)
576# define RS480_2LEVEL_GART (0 << 30)
577# define RS480_1LEVEL_GART (1 << 30)
578# define RS480_PDC_EN (1 << 31)
579#define RS480_GART_BASE 0x2c
580#define RS480_GART_CACHE_CNTRL 0x2e
581# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
582#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
583# define RS480_GART_EN (1 << 0)
584# define RS480_VA_SIZE_32MB (0 << 1)
585# define RS480_VA_SIZE_64MB (1 << 1)
586# define RS480_VA_SIZE_128MB (2 << 1)
587# define RS480_VA_SIZE_256MB (3 << 1)
588# define RS480_VA_SIZE_512MB (4 << 1)
589# define RS480_VA_SIZE_1GB (5 << 1)
590# define RS480_VA_SIZE_2GB (6 << 1)
591#define RS480_AGP_MODE_CNTL 0x39
592# define RS480_POST_GART_Q_SIZE (1 << 18)
593# define RS480_NONGART_SNOOP (1 << 19)
594# define RS480_AGP_RD_BUF_SIZE (1 << 20)
595# define RS480_REQ_TYPE_SNOOP_SHIFT 22
596# define RS480_REQ_TYPE_SNOOP_MASK 0x3
597# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
598#define RS480_MC_MISC_UMA_CNTL 0x5f
599#define RS480_MC_MCLK_CNTL 0x7a
600#define RS480_MC_UMA_DUALCH_CNTL 0x86
601
602#define RS690_MC_FB_LOCATION 0x100
603#define RS690_MC_AGP_LOCATION 0x101
604#define RS690_MC_AGP_BASE 0x102
605#define RS690_MC_AGP_BASE_2 0x103
606
607#define RS600_MC_INDEX 0x70
608# define RS600_MC_ADDR_MASK 0xffff
609# define RS600_MC_IND_SEQ_RBS_0 (1 << 16)
610# define RS600_MC_IND_SEQ_RBS_1 (1 << 17)
611# define RS600_MC_IND_SEQ_RBS_2 (1 << 18)
612# define RS600_MC_IND_SEQ_RBS_3 (1 << 19)
613# define RS600_MC_IND_AIC_RBS (1 << 20)
614# define RS600_MC_IND_CITF_ARB0 (1 << 21)
615# define RS600_MC_IND_CITF_ARB1 (1 << 22)
616# define RS600_MC_IND_WR_EN (1 << 23)
617#define RS600_MC_DATA 0x74
618
619#define RS600_MC_STATUS 0x0
620# define RS600_MC_IDLE (1 << 1)
621#define RS600_MC_FB_LOCATION 0x4
622#define RS600_MC_AGP_LOCATION 0x5
623#define RS600_AGP_BASE 0x6
624#define RS600_AGP_BASE_2 0x7
625#define RS600_MC_CNTL1 0x9
626# define RS600_ENABLE_PAGE_TABLES (1 << 26)
627#define RS600_MC_PT0_CNTL 0x100
628# define RS600_ENABLE_PT (1 << 0)
629# define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
630# define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
631# define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28)
632# define RS600_INVALIDATE_L2_CACHE (1 << 29)
633#define RS600_MC_PT0_CONTEXT0_CNTL 0x102
634# define RS600_ENABLE_PAGE_TABLE (1 << 0)
635# define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1)
636#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112
637#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114
638#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
639#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c
640#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c
641#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c
642#define RS600_MC_PT0_CLIENT0_CNTL 0x16c
643# define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0)
644# define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1)
645# define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8)
646# define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8)
647# define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8)
648# define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8)
649# define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8)
650# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10)
651# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10)
652# define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
653# define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
654# define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
655# define RS600_INVALIDATE_L1_TLB (1 << 20)
656
657#define R520_MC_IND_INDEX 0x70
658#define R520_MC_IND_WR_EN (1 << 24)
659#define R520_MC_IND_DATA 0x74
660
661#define RV515_MC_FB_LOCATION 0x01
662#define RV515_MC_AGP_LOCATION 0x02
663#define RV515_MC_AGP_BASE 0x03
664#define RV515_MC_AGP_BASE_2 0x04
665
666#define R520_MC_FB_LOCATION 0x04
667#define R520_MC_AGP_LOCATION 0x05
668#define R520_MC_AGP_BASE 0x06
669#define R520_MC_AGP_BASE_2 0x07
670
671#define RADEON_MPP_TB_CONFIG 0x01c0
672#define RADEON_MEM_CNTL 0x0140
673#define RADEON_MEM_SDRAM_MODE_REG 0x0158
674#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
675#define RS480_AGP_BASE_2 0x0164
676#define RADEON_AGP_BASE 0x0170
677
678/* pipe config regs */
679#define R400_GB_PIPE_SELECT 0x402c
680#define RV530_GB_PIPE_SELECT2 0x4124
681#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
682#define R300_GB_TILE_CONFIG 0x4018
683# define R300_ENABLE_TILING (1 << 0)
684# define R300_PIPE_COUNT_RV350 (0 << 1)
685# define R300_PIPE_COUNT_R300 (3 << 1)
686# define R300_PIPE_COUNT_R420_3P (6 << 1)
687# define R300_PIPE_COUNT_R420 (7 << 1)
688# define R300_TILE_SIZE_8 (0 << 4)
689# define R300_TILE_SIZE_16 (1 << 4)
690# define R300_TILE_SIZE_32 (2 << 4)
691# define R300_SUBPIXEL_1_12 (0 << 16)
692# define R300_SUBPIXEL_1_16 (1 << 16)
693#define R300_DST_PIPE_CONFIG 0x170c
694# define R300_PIPE_AUTO_CONFIG (1 << 31)
695#define R300_RB2D_DSTCACHE_MODE 0x3428
696# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
697# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
698
699#define RADEON_RB3D_COLOROFFSET 0x1c40
700#define RADEON_RB3D_COLORPITCH 0x1c48
701
702#define RADEON_SRC_X_Y 0x1590
703
704#define RADEON_DP_GUI_MASTER_CNTL 0x146c
705# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
706# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
707# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
708# define RADEON_GMC_BRUSH_NONE (15 << 4)
709# define RADEON_GMC_DST_16BPP (4 << 8)
710# define RADEON_GMC_DST_24BPP (5 << 8)
711# define RADEON_GMC_DST_32BPP (6 << 8)
712# define RADEON_GMC_DST_DATATYPE_SHIFT 8
713# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
714# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
715# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
716# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
717# define RADEON_GMC_WR_MSK_DIS (1 << 30)
718# define RADEON_ROP3_S 0x00cc0000
719# define RADEON_ROP3_P 0x00f00000
720#define RADEON_DP_WRITE_MASK 0x16cc
721#define RADEON_SRC_PITCH_OFFSET 0x1428
722#define RADEON_DST_PITCH_OFFSET 0x142c
723#define RADEON_DST_PITCH_OFFSET_C 0x1c80
724# define RADEON_DST_TILE_LINEAR (0 << 30)
725# define RADEON_DST_TILE_MACRO (1 << 30)
726# define RADEON_DST_TILE_MICRO (2 << 30)
727# define RADEON_DST_TILE_BOTH (3 << 30)
728
729#define RADEON_SCRATCH_REG0 0x15e0
730#define RADEON_SCRATCH_REG1 0x15e4
731#define RADEON_SCRATCH_REG2 0x15e8
732#define RADEON_SCRATCH_REG3 0x15ec
733#define RADEON_SCRATCH_REG4 0x15f0
734#define RADEON_SCRATCH_REG5 0x15f4
735#define RADEON_SCRATCH_UMSK 0x0770
736#define RADEON_SCRATCH_ADDR 0x0774
737
738#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
739
740extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
741
742#define GET_SCRATCH(dev_priv, x) radeon_get_scratch(dev_priv, x)
743
744#define R600_SCRATCH_REG0 0x8500
745#define R600_SCRATCH_REG1 0x8504
746#define R600_SCRATCH_REG2 0x8508
747#define R600_SCRATCH_REG3 0x850c
748#define R600_SCRATCH_REG4 0x8510
749#define R600_SCRATCH_REG5 0x8514
750#define R600_SCRATCH_REG6 0x8518
751#define R600_SCRATCH_REG7 0x851c
752#define R600_SCRATCH_UMSK 0x8540
753#define R600_SCRATCH_ADDR 0x8544
754
755#define R600_SCRATCHOFF(x) (R600_SCRATCH_REG_OFFSET + 4*(x))
756
757#define RADEON_GEN_INT_CNTL 0x0040
758# define RADEON_CRTC_VBLANK_MASK (1 << 0)
759# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
760# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
761# define RADEON_SW_INT_ENABLE (1 << 25)
762
763#define RADEON_GEN_INT_STATUS 0x0044
764# define RADEON_CRTC_VBLANK_STAT (1 << 0)
765# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
766# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
767# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
768# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
769# define RADEON_SW_INT_TEST (1 << 25)
770# define RADEON_SW_INT_TEST_ACK (1 << 25)
771# define RADEON_SW_INT_FIRE (1 << 26)
772# define R500_DISPLAY_INT_STATUS (1 << 0)
773
774#define RADEON_HOST_PATH_CNTL 0x0130
775# define RADEON_HDP_SOFT_RESET (1 << 26)
776# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28)
777# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28)
778
779#define RADEON_ISYNC_CNTL 0x1724
780# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
781# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
782# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
783# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
784# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
785# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
786
787#define RADEON_RBBM_GUICNTL 0x172c
788# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
789# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
790# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
791# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
792
793#define RADEON_MC_AGP_LOCATION 0x014c
794#define RADEON_MC_FB_LOCATION 0x0148
795#define RADEON_MCLK_CNTL 0x0012
796# define RADEON_FORCEON_MCLKA (1 << 16)
797# define RADEON_FORCEON_MCLKB (1 << 17)
798# define RADEON_FORCEON_YCLKA (1 << 18)
799# define RADEON_FORCEON_YCLKB (1 << 19)
800# define RADEON_FORCEON_MC (1 << 20)
801# define RADEON_FORCEON_AIC (1 << 21)
802
803#define RADEON_PP_BORDER_COLOR_0 0x1d40
804#define RADEON_PP_BORDER_COLOR_1 0x1d44
805#define RADEON_PP_BORDER_COLOR_2 0x1d48
806#define RADEON_PP_CNTL 0x1c38
807# define RADEON_SCISSOR_ENABLE (1 << 1)
808#define RADEON_PP_LUM_MATRIX 0x1d00
809#define RADEON_PP_MISC 0x1c14
810#define RADEON_PP_ROT_MATRIX_0 0x1d58
811#define RADEON_PP_TXFILTER_0 0x1c54
812#define RADEON_PP_TXOFFSET_0 0x1c5c
813#define RADEON_PP_TXFILTER_1 0x1c6c
814#define RADEON_PP_TXFILTER_2 0x1c84
815
816#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */
817#define R300_DSTCACHE_CTLSTAT 0x1714
818# define R300_RB2D_DC_FLUSH (3 << 0)
819# define R300_RB2D_DC_FREE (3 << 2)
820# define R300_RB2D_DC_FLUSH_ALL 0xf
821# define R300_RB2D_DC_BUSY (1 << 31)
822#define RADEON_RB3D_CNTL 0x1c3c
823# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
824# define RADEON_PLANE_MASK_ENABLE (1 << 1)
825# define RADEON_DITHER_ENABLE (1 << 2)
826# define RADEON_ROUND_ENABLE (1 << 3)
827# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
828# define RADEON_DITHER_INIT (1 << 5)
829# define RADEON_ROP_ENABLE (1 << 6)
830# define RADEON_STENCIL_ENABLE (1 << 7)
831# define RADEON_Z_ENABLE (1 << 8)
832# define RADEON_ZBLOCK16 (1 << 15)
833#define RADEON_RB3D_DEPTHOFFSET 0x1c24
834#define RADEON_RB3D_DEPTHCLEARVALUE 0x3230
835#define RADEON_RB3D_DEPTHPITCH 0x1c28
836#define RADEON_RB3D_PLANEMASK 0x1d84
837#define RADEON_RB3D_STENCILREFMASK 0x1d7c
838#define RADEON_RB3D_ZCACHE_MODE 0x3250
839#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
840# define RADEON_RB3D_ZC_FLUSH (1 << 0)
841# define RADEON_RB3D_ZC_FREE (1 << 2)
842# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
843# define RADEON_RB3D_ZC_BUSY (1 << 31)
844#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
845# define R300_ZC_FLUSH (1 << 0)
846# define R300_ZC_FREE (1 << 1)
847# define R300_ZC_BUSY (1 << 31)
848#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
849# define RADEON_RB3D_DC_FLUSH (3 << 0)
850# define RADEON_RB3D_DC_FREE (3 << 2)
851# define RADEON_RB3D_DC_FLUSH_ALL 0xf
852# define RADEON_RB3D_DC_BUSY (1 << 31)
853#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
854# define R300_RB3D_DC_FLUSH (2 << 0)
855# define R300_RB3D_DC_FREE (2 << 2)
856# define R300_RB3D_DC_FINISH (1 << 4)
857#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
858# define RADEON_Z_TEST_MASK (7 << 4)
859# define RADEON_Z_TEST_ALWAYS (7 << 4)
860# define RADEON_Z_HIERARCHY_ENABLE (1 << 8)
861# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
862# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16)
863# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
864# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
865# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
866# define RADEON_FORCE_Z_DIRTY (1 << 29)
867# define RADEON_Z_WRITE_ENABLE (1 << 30)
868# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31)
869#define RADEON_RBBM_SOFT_RESET 0x00f0
870# define RADEON_SOFT_RESET_CP (1 << 0)
871# define RADEON_SOFT_RESET_HI (1 << 1)
872# define RADEON_SOFT_RESET_SE (1 << 2)
873# define RADEON_SOFT_RESET_RE (1 << 3)
874# define RADEON_SOFT_RESET_PP (1 << 4)
875# define RADEON_SOFT_RESET_E2 (1 << 5)
876# define RADEON_SOFT_RESET_RB (1 << 6)
877# define RADEON_SOFT_RESET_HDP (1 << 7)
878/*
879 * 6:0 Available slots in the FIFO
880 * 8 Host Interface active
881 * 9 CP request active
882 * 10 FIFO request active
883 * 11 Host Interface retry active
884 * 12 CP retry active
885 * 13 FIFO retry active
886 * 14 FIFO pipeline busy
887 * 15 Event engine busy
888 * 16 CP command stream busy
889 * 17 2D engine busy
890 * 18 2D portion of render backend busy
891 * 20 3D setup engine busy
892 * 26 GA engine busy
893 * 27 CBA 2D engine busy
894 * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
895 * command stream queue not empty or Ring Buffer not empty
896 */
897#define RADEON_RBBM_STATUS 0x0e40
898/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
899/* #define RADEON_RBBM_STATUS 0x1740 */
900/* bits 6:0 are dword slots available in the cmd fifo */
901# define RADEON_RBBM_FIFOCNT_MASK 0x007f
902# define RADEON_HIRQ_ON_RBB (1 << 8)
903# define RADEON_CPRQ_ON_RBB (1 << 9)
904# define RADEON_CFRQ_ON_RBB (1 << 10)
905# define RADEON_HIRQ_IN_RTBUF (1 << 11)
906# define RADEON_CPRQ_IN_RTBUF (1 << 12)
907# define RADEON_CFRQ_IN_RTBUF (1 << 13)
908# define RADEON_PIPE_BUSY (1 << 14)
909# define RADEON_ENG_EV_BUSY (1 << 15)
910# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
911# define RADEON_E2_BUSY (1 << 17)
912# define RADEON_RB2D_BUSY (1 << 18)
913# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
914# define RADEON_VAP_BUSY (1 << 20)
915# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
916# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
917# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
918# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
919# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
920# define RADEON_GA_BUSY (1 << 26)
921# define RADEON_CBA2D_BUSY (1 << 27)
922# define RADEON_RBBM_ACTIVE (1 << 31)
923#define RADEON_RE_LINE_PATTERN 0x1cd0
924#define RADEON_RE_MISC 0x26c4
925#define RADEON_RE_TOP_LEFT 0x26c0
926#define RADEON_RE_WIDTH_HEIGHT 0x1c44
927#define RADEON_RE_STIPPLE_ADDR 0x1cc8
928#define RADEON_RE_STIPPLE_DATA 0x1ccc
929
930#define RADEON_SCISSOR_TL_0 0x1cd8
931#define RADEON_SCISSOR_BR_0 0x1cdc
932#define RADEON_SCISSOR_TL_1 0x1ce0
933#define RADEON_SCISSOR_BR_1 0x1ce4
934#define RADEON_SCISSOR_TL_2 0x1ce8
935#define RADEON_SCISSOR_BR_2 0x1cec
936#define RADEON_SE_COORD_FMT 0x1c50
937#define RADEON_SE_CNTL 0x1c4c
938# define RADEON_FFACE_CULL_CW (0 << 0)
939# define RADEON_BFACE_SOLID (3 << 1)
940# define RADEON_FFACE_SOLID (3 << 3)
941# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
942# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
943# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
944# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
945# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
946# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
947# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
948# define RADEON_FOG_SHADE_FLAT (1 << 14)
949# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
950# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
951# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
952# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
953# define RADEON_ROUND_MODE_TRUNC (0 << 28)
954# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
955#define RADEON_SE_CNTL_STATUS 0x2140
956#define RADEON_SE_LINE_WIDTH 0x1db8
957#define RADEON_SE_VPORT_XSCALE 0x1d98
958#define RADEON_SE_ZBIAS_FACTOR 0x1db0
959#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
960#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
961#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200
962# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16
963# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28
964#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204
965#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208
966# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16
967#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C
968#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8
969#define RADEON_SURFACE_ACCESS_CLR 0x0bfc
970#define RADEON_SURFACE_CNTL 0x0b00
971# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
972# define RADEON_NONSURF_AP0_SWP_MASK (3 << 20)
973# define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20)
974# define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20)
975# define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20)
976# define RADEON_NONSURF_AP1_SWP_MASK (3 << 22)
977# define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22)
978# define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22)
979# define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22)
980#define RADEON_SURFACE0_INFO 0x0b0c
981# define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0)
982# define RADEON_SURF_TILE_MODE_MASK (3 << 16)
983# define RADEON_SURF_TILE_MODE_MACRO (0 << 16)
984# define RADEON_SURF_TILE_MODE_MICRO (1 << 16)
985# define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16)
986# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16)
987#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
988#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
989# define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0)
990#define RADEON_SURFACE1_INFO 0x0b1c
991#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
992#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
993#define RADEON_SURFACE2_INFO 0x0b2c
994#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
995#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
996#define RADEON_SURFACE3_INFO 0x0b3c
997#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
998#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
999#define RADEON_SURFACE4_INFO 0x0b4c
1000#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
1001#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
1002#define RADEON_SURFACE5_INFO 0x0b5c
1003#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
1004#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
1005#define RADEON_SURFACE6_INFO 0x0b6c
1006#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
1007#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
1008#define RADEON_SURFACE7_INFO 0x0b7c
1009#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
1010#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
1011#define RADEON_SW_SEMAPHORE 0x013c
1012
1013#define RADEON_WAIT_UNTIL 0x1720
1014# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
1015# define RADEON_WAIT_2D_IDLE (1 << 14)
1016# define RADEON_WAIT_3D_IDLE (1 << 15)
1017# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
1018# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
1019# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
1020
1021#define RADEON_RB3D_ZMASKOFFSET 0x3234
1022#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
1023# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
1024# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
1025
1026/* CP registers */
1027#define RADEON_CP_ME_RAM_ADDR 0x07d4
1028#define RADEON_CP_ME_RAM_RADDR 0x07d8
1029#define RADEON_CP_ME_RAM_DATAH 0x07dc
1030#define RADEON_CP_ME_RAM_DATAL 0x07e0
1031
1032#define RADEON_CP_RB_BASE 0x0700
1033#define RADEON_CP_RB_CNTL 0x0704
1034# define RADEON_BUF_SWAP_32BIT (2 << 16)
1035# define RADEON_RB_NO_UPDATE (1 << 27)
1036# define RADEON_RB_RPTR_WR_ENA (1 << 31)
1037#define RADEON_CP_RB_RPTR_ADDR 0x070c
1038#define RADEON_CP_RB_RPTR 0x0710
1039#define RADEON_CP_RB_WPTR 0x0714
1040
1041#define RADEON_CP_RB_WPTR_DELAY 0x0718
1042# define RADEON_PRE_WRITE_TIMER_SHIFT 0
1043# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
1044
1045#define RADEON_CP_IB_BASE 0x0738
1046
1047#define RADEON_CP_CSQ_CNTL 0x0740
1048# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
1049# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
1050# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
1051# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
1052# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
1053# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
1054# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
1055
1056#define R300_CP_RESYNC_ADDR 0x0778
1057#define R300_CP_RESYNC_DATA 0x077c
1058
1059#define RADEON_AIC_CNTL 0x01d0
1060# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
1061# define RS400_MSI_REARM (1 << 3)
1062#define RADEON_AIC_STAT 0x01d4
1063#define RADEON_AIC_PT_BASE 0x01d8
1064#define RADEON_AIC_LO_ADDR 0x01dc
1065#define RADEON_AIC_HI_ADDR 0x01e0
1066#define RADEON_AIC_TLB_ADDR 0x01e4
1067#define RADEON_AIC_TLB_DATA 0x01e8
1068
1069/* CP command packets */
1070#define RADEON_CP_PACKET0 0x00000000
1071# define RADEON_ONE_REG_WR (1 << 15)
1072#define RADEON_CP_PACKET1 0x40000000
1073#define RADEON_CP_PACKET2 0x80000000
1074#define RADEON_CP_PACKET3 0xC0000000
1075# define RADEON_CP_NOP 0x00001000
1076# define RADEON_CP_NEXT_CHAR 0x00001900
1077# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
1078# define RADEON_CP_SET_SCISSORS 0x00001E00
1079 /* GEN_INDX_PRIM is unsupported starting with R300 */
1080# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
1081# define RADEON_WAIT_FOR_IDLE 0x00002600
1082# define RADEON_3D_DRAW_VBUF 0x00002800
1083# define RADEON_3D_DRAW_IMMD 0x00002900
1084# define RADEON_3D_DRAW_INDX 0x00002A00
1085# define RADEON_CP_LOAD_PALETTE 0x00002C00
1086# define RADEON_3D_LOAD_VBPNTR 0x00002F00
1087# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
1088# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
1089# define RADEON_3D_CLEAR_ZMASK 0x00003200
1090# define RADEON_CP_INDX_BUFFER 0x00003300
1091# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
1092# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
1093# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
1094# define RADEON_3D_CLEAR_HIZ 0x00003700
1095# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
1096# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
1097# define RADEON_CNTL_PAINT_MULTI 0x00009A00
1098# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
1099# define RADEON_CNTL_SET_SCISSORS 0xC0001E00
1100
1101# define R600_IT_INDIRECT_BUFFER_END 0x00001700
1102# define R600_IT_SET_PREDICATION 0x00002000
1103# define R600_IT_REG_RMW 0x00002100
1104# define R600_IT_COND_EXEC 0x00002200
1105# define R600_IT_PRED_EXEC 0x00002300
1106# define R600_IT_START_3D_CMDBUF 0x00002400
1107# define R600_IT_DRAW_INDEX_2 0x00002700
1108# define R600_IT_CONTEXT_CONTROL 0x00002800
1109# define R600_IT_DRAW_INDEX_IMMD_BE 0x00002900
1110# define R600_IT_INDEX_TYPE 0x00002A00
1111# define R600_IT_DRAW_INDEX 0x00002B00
1112# define R600_IT_DRAW_INDEX_AUTO 0x00002D00
1113# define R600_IT_DRAW_INDEX_IMMD 0x00002E00
1114# define R600_IT_NUM_INSTANCES 0x00002F00
1115# define R600_IT_STRMOUT_BUFFER_UPDATE 0x00003400
1116# define R600_IT_INDIRECT_BUFFER_MP 0x00003800
1117# define R600_IT_MEM_SEMAPHORE 0x00003900
1118# define R600_IT_MPEG_INDEX 0x00003A00
1119# define R600_IT_WAIT_REG_MEM 0x00003C00
1120# define R600_IT_MEM_WRITE 0x00003D00
1121# define R600_IT_INDIRECT_BUFFER 0x00003200
1122# define R600_IT_SURFACE_SYNC 0x00004300
1123# define R600_CB0_DEST_BASE_ENA (1 << 6)
1124# define R600_TC_ACTION_ENA (1 << 23)
1125# define R600_VC_ACTION_ENA (1 << 24)
1126# define R600_CB_ACTION_ENA (1 << 25)
1127# define R600_DB_ACTION_ENA (1 << 26)
1128# define R600_SH_ACTION_ENA (1 << 27)
1129# define R600_SMX_ACTION_ENA (1 << 28)
1130# define R600_IT_ME_INITIALIZE 0x00004400
1131# define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
1132# define R600_IT_COND_WRITE 0x00004500
1133# define R600_IT_EVENT_WRITE 0x00004600
1134# define R600_IT_EVENT_WRITE_EOP 0x00004700
1135# define R600_IT_ONE_REG_WRITE 0x00005700
1136# define R600_IT_SET_CONFIG_REG 0x00006800
1137# define R600_SET_CONFIG_REG_OFFSET 0x00008000
1138# define R600_SET_CONFIG_REG_END 0x0000ac00
1139# define R600_IT_SET_CONTEXT_REG 0x00006900
1140# define R600_SET_CONTEXT_REG_OFFSET 0x00028000
1141# define R600_SET_CONTEXT_REG_END 0x00029000
1142# define R600_IT_SET_ALU_CONST 0x00006A00
1143# define R600_SET_ALU_CONST_OFFSET 0x00030000
1144# define R600_SET_ALU_CONST_END 0x00032000
1145# define R600_IT_SET_BOOL_CONST 0x00006B00
1146# define R600_SET_BOOL_CONST_OFFSET 0x0003e380
1147# define R600_SET_BOOL_CONST_END 0x00040000
1148# define R600_IT_SET_LOOP_CONST 0x00006C00
1149# define R600_SET_LOOP_CONST_OFFSET 0x0003e200
1150# define R600_SET_LOOP_CONST_END 0x0003e380
1151# define R600_IT_SET_RESOURCE 0x00006D00
1152# define R600_SET_RESOURCE_OFFSET 0x00038000
1153# define R600_SET_RESOURCE_END 0x0003c000
1154# define R600_SQ_TEX_VTX_INVALID_TEXTURE 0x0
1155# define R600_SQ_TEX_VTX_INVALID_BUFFER 0x1
1156# define R600_SQ_TEX_VTX_VALID_TEXTURE 0x2
1157# define R600_SQ_TEX_VTX_VALID_BUFFER 0x3
1158# define R600_IT_SET_SAMPLER 0x00006E00
1159# define R600_SET_SAMPLER_OFFSET 0x0003c000
1160# define R600_SET_SAMPLER_END 0x0003cff0
1161# define R600_IT_SET_CTL_CONST 0x00006F00
1162# define R600_SET_CTL_CONST_OFFSET 0x0003cff0
1163# define R600_SET_CTL_CONST_END 0x0003e200
1164# define R600_IT_SURFACE_BASE_UPDATE 0x00007300
1165
1166#define RADEON_CP_PACKET_MASK 0xC0000000
1167#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
1168#define RADEON_CP_PACKET0_REG_MASK 0x000007ff
1169#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
1170#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
1171
1172#define RADEON_VTX_Z_PRESENT (1 << 31)
1173#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3)
1174
1175#define RADEON_PRIM_TYPE_NONE (0 << 0)
1176#define RADEON_PRIM_TYPE_POINT (1 << 0)
1177#define RADEON_PRIM_TYPE_LINE (2 << 0)
1178#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0)
1179#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0)
1180#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0)
1181#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0)
1182#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0)
1183#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0)
1184#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
1185#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
1186#define RADEON_PRIM_TYPE_MASK 0xf
1187#define RADEON_PRIM_WALK_IND (1 << 4)
1188#define RADEON_PRIM_WALK_LIST (2 << 4)
1189#define RADEON_PRIM_WALK_RING (3 << 4)
1190#define RADEON_COLOR_ORDER_BGRA (0 << 6)
1191#define RADEON_COLOR_ORDER_RGBA (1 << 6)
1192#define RADEON_MAOS_ENABLE (1 << 7)
1193#define RADEON_VTX_FMT_R128_MODE (0 << 8)
1194#define RADEON_VTX_FMT_RADEON_MODE (1 << 8)
1195#define RADEON_NUM_VERTICES_SHIFT 16
1196
1197#define RADEON_COLOR_FORMAT_CI8 2
1198#define RADEON_COLOR_FORMAT_ARGB1555 3
1199#define RADEON_COLOR_FORMAT_RGB565 4
1200#define RADEON_COLOR_FORMAT_ARGB8888 6
1201#define RADEON_COLOR_FORMAT_RGB332 7
1202#define RADEON_COLOR_FORMAT_RGB8 9
1203#define RADEON_COLOR_FORMAT_ARGB4444 15
1204
1205#define RADEON_TXFORMAT_I8 0
1206#define RADEON_TXFORMAT_AI88 1
1207#define RADEON_TXFORMAT_RGB332 2
1208#define RADEON_TXFORMAT_ARGB1555 3
1209#define RADEON_TXFORMAT_RGB565 4
1210#define RADEON_TXFORMAT_ARGB4444 5
1211#define RADEON_TXFORMAT_ARGB8888 6
1212#define RADEON_TXFORMAT_RGBA8888 7
1213#define RADEON_TXFORMAT_Y8 8
1214#define RADEON_TXFORMAT_VYUY422 10
1215#define RADEON_TXFORMAT_YVYU422 11
1216#define RADEON_TXFORMAT_DXT1 12
1217#define RADEON_TXFORMAT_DXT23 14
1218#define RADEON_TXFORMAT_DXT45 15
1219
1220#define R200_PP_TXCBLEND_0 0x2f00
1221#define R200_PP_TXCBLEND_1 0x2f10
1222#define R200_PP_TXCBLEND_2 0x2f20
1223#define R200_PP_TXCBLEND_3 0x2f30
1224#define R200_PP_TXCBLEND_4 0x2f40
1225#define R200_PP_TXCBLEND_5 0x2f50
1226#define R200_PP_TXCBLEND_6 0x2f60
1227#define R200_PP_TXCBLEND_7 0x2f70
1228#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268
1229#define R200_PP_TFACTOR_0 0x2ee0
1230#define R200_SE_VTX_FMT_0 0x2088
1231#define R200_SE_VAP_CNTL 0x2080
1232#define R200_SE_TCL_MATRIX_SEL_0 0x2230
1233#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8
1234#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0
1235#define R200_PP_TXFILTER_5 0x2ca0
1236#define R200_PP_TXFILTER_4 0x2c80
1237#define R200_PP_TXFILTER_3 0x2c60
1238#define R200_PP_TXFILTER_2 0x2c40
1239#define R200_PP_TXFILTER_1 0x2c20
1240#define R200_PP_TXFILTER_0 0x2c00
1241#define R200_PP_TXOFFSET_5 0x2d78
1242#define R200_PP_TXOFFSET_4 0x2d60
1243#define R200_PP_TXOFFSET_3 0x2d48
1244#define R200_PP_TXOFFSET_2 0x2d30
1245#define R200_PP_TXOFFSET_1 0x2d18
1246#define R200_PP_TXOFFSET_0 0x2d00
1247
1248#define R200_PP_CUBIC_FACES_0 0x2c18
1249#define R200_PP_CUBIC_FACES_1 0x2c38
1250#define R200_PP_CUBIC_FACES_2 0x2c58
1251#define R200_PP_CUBIC_FACES_3 0x2c78
1252#define R200_PP_CUBIC_FACES_4 0x2c98
1253#define R200_PP_CUBIC_FACES_5 0x2cb8
1254#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04
1255#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08
1256#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c
1257#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10
1258#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14
1259#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c
1260#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20
1261#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24
1262#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28
1263#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c
1264#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34
1265#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38
1266#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c
1267#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40
1268#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44
1269#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c
1270#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50
1271#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54
1272#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58
1273#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c
1274#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64
1275#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68
1276#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c
1277#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70
1278#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74
1279#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c
1280#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80
1281#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84
1282#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88
1283#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c
1284
1285#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
1286#define R200_SE_VTE_CNTL 0x20b0
1287#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
1288#define R200_PP_TAM_DEBUG3 0x2d9c
1289#define R200_PP_CNTL_X 0x2cc4
1290#define R200_SE_VAP_CNTL_STATUS 0x2140
1291#define R200_RE_SCISSOR_TL_0 0x1cd8
1292#define R200_RE_SCISSOR_TL_1 0x1ce0
1293#define R200_RE_SCISSOR_TL_2 0x1ce8
1294#define R200_RB3D_DEPTHXY_OFFSET 0x1d60
1295#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
1296#define R200_SE_VTX_STATE_CNTL 0x2180
1297#define R200_RE_POINTSIZE 0x2648
1298#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
1299
1300#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
1301#define RADEON_PP_TEX_SIZE_1 0x1d0c
1302#define RADEON_PP_TEX_SIZE_2 0x1d14
1303
1304#define RADEON_PP_CUBIC_FACES_0 0x1d24
1305#define RADEON_PP_CUBIC_FACES_1 0x1d28
1306#define RADEON_PP_CUBIC_FACES_2 0x1d2c
1307#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
1308#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
1309#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
1310
1311#define RADEON_SE_TCL_STATE_FLUSH 0x2284
1312
1313#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001
1314#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000
1315#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012
1316#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100
1317#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200
1318#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001
1319#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002
1320#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b
1321#define R200_3D_DRAW_IMMD_2 0xC0003500
1322#define R200_SE_VTX_FMT_1 0x208c
1323#define R200_RE_CNTL 0x1c50
1324
1325#define R200_RB3D_BLENDCOLOR 0x3218
1326
1327#define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4
1328
1329#define R200_PP_TRI_PERF 0x2cf8
1330
1331#define R200_PP_AFS_0 0x2f80
1332#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */
1333
1334#define R200_VAP_PVS_CNTL_1 0x22D0
1335
1336#define RADEON_CRTC_CRNT_FRAME 0x0214
1337#define RADEON_CRTC2_CRNT_FRAME 0x0314
1338
1339#define R500_D1CRTC_STATUS 0x609c
1340#define R500_D2CRTC_STATUS 0x689c
1341#define R500_CRTC_V_BLANK (1<<0)
1342
1343#define R500_D1CRTC_FRAME_COUNT 0x60a4
1344#define R500_D2CRTC_FRAME_COUNT 0x68a4
1345
1346#define R500_D1MODE_V_COUNTER 0x6530
1347#define R500_D2MODE_V_COUNTER 0x6d30
1348
1349#define R500_D1MODE_VBLANK_STATUS 0x6534
1350#define R500_D2MODE_VBLANK_STATUS 0x6d34
1351#define R500_VBLANK_OCCURED (1<<0)
1352#define R500_VBLANK_ACK (1<<4)
1353#define R500_VBLANK_STAT (1<<12)
1354#define R500_VBLANK_INT (1<<16)
1355
1356#define R500_DxMODE_INT_MASK 0x6540
1357#define R500_D1MODE_INT_MASK (1<<0)
1358#define R500_D2MODE_INT_MASK (1<<8)
1359
1360#define R500_DISP_INTERRUPT_STATUS 0x7edc
1361#define R500_D1_VBLANK_INTERRUPT (1 << 4)
1362#define R500_D2_VBLANK_INTERRUPT (1 << 5)
1363
1364/* R6xx/R7xx registers */
1365#define R600_MC_VM_FB_LOCATION 0x2180
1366#define R600_MC_VM_AGP_TOP 0x2184
1367#define R600_MC_VM_AGP_BOT 0x2188
1368#define R600_MC_VM_AGP_BASE 0x218c
1369#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
1370#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
1371#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
1372
1373#define R700_MC_VM_FB_LOCATION 0x2024
1374#define R700_MC_VM_AGP_TOP 0x2028
1375#define R700_MC_VM_AGP_BOT 0x202c
1376#define R700_MC_VM_AGP_BASE 0x2030
1377#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
1378#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
1379#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c
1380
1381#define R600_MCD_RD_A_CNTL 0x219c
1382#define R600_MCD_RD_B_CNTL 0x21a0
1383
1384#define R600_MCD_WR_A_CNTL 0x21a4
1385#define R600_MCD_WR_B_CNTL 0x21a8
1386
1387#define R600_MCD_RD_SYS_CNTL 0x2200
1388#define R600_MCD_WR_SYS_CNTL 0x2214
1389
1390#define R600_MCD_RD_GFX_CNTL 0x21fc
1391#define R600_MCD_RD_HDP_CNTL 0x2204
1392#define R600_MCD_RD_PDMA_CNTL 0x2208
1393#define R600_MCD_RD_SEM_CNTL 0x220c
1394#define R600_MCD_WR_GFX_CNTL 0x2210
1395#define R600_MCD_WR_HDP_CNTL 0x2218
1396#define R600_MCD_WR_PDMA_CNTL 0x221c
1397#define R600_MCD_WR_SEM_CNTL 0x2220
1398
1399# define R600_MCD_L1_TLB (1 << 0)
1400# define R600_MCD_L1_FRAG_PROC (1 << 1)
1401# define R600_MCD_L1_STRICT_ORDERING (1 << 2)
1402
1403# define R600_MCD_SYSTEM_ACCESS_MODE_MASK (3 << 6)
1404# define R600_MCD_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6)
1405# define R600_MCD_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6)
1406# define R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS (2 << 6)
1407# define R600_MCD_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6)
1408
1409# define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8)
1410# define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8)
1411
1412# define R600_MCD_SEMAPHORE_MODE (1 << 10)
1413# define R600_MCD_WAIT_L2_QUERY (1 << 11)
1414# define R600_MCD_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 12)
1415# define R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
1416
1417#define R700_MC_VM_MD_L1_TLB0_CNTL 0x2654
1418#define R700_MC_VM_MD_L1_TLB1_CNTL 0x2658
1419#define R700_MC_VM_MD_L1_TLB2_CNTL 0x265c
1420
1421#define R700_MC_VM_MB_L1_TLB0_CNTL 0x2234
1422#define R700_MC_VM_MB_L1_TLB1_CNTL 0x2238
1423#define R700_MC_VM_MB_L1_TLB2_CNTL 0x223c
1424#define R700_MC_VM_MB_L1_TLB3_CNTL 0x2240
1425
1426# define R700_ENABLE_L1_TLB (1 << 0)
1427# define R700_ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
1428# define R700_SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
1429# define R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
1430# define R700_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 15)
1431# define R700_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 18)
1432
1433#define R700_MC_ARB_RAMCFG 0x2760
1434# define R700_NOOFBANK_SHIFT 0
1435# define R700_NOOFBANK_MASK 0x3
1436# define R700_NOOFRANK_SHIFT 2
1437# define R700_NOOFRANK_MASK 0x1
1438# define R700_NOOFROWS_SHIFT 3
1439# define R700_NOOFROWS_MASK 0x7
1440# define R700_NOOFCOLS_SHIFT 6
1441# define R700_NOOFCOLS_MASK 0x3
1442# define R700_CHANSIZE_SHIFT 8
1443# define R700_CHANSIZE_MASK 0x1
1444# define R700_BURSTLENGTH_SHIFT 9
1445# define R700_BURSTLENGTH_MASK 0x1
1446#define R600_RAMCFG 0x2408
1447# define R600_NOOFBANK_SHIFT 0
1448# define R600_NOOFBANK_MASK 0x1
1449# define R600_NOOFRANK_SHIFT 1
1450# define R600_NOOFRANK_MASK 0x1
1451# define R600_NOOFROWS_SHIFT 2
1452# define R600_NOOFROWS_MASK 0x7
1453# define R600_NOOFCOLS_SHIFT 5
1454# define R600_NOOFCOLS_MASK 0x3
1455# define R600_CHANSIZE_SHIFT 7
1456# define R600_CHANSIZE_MASK 0x1
1457# define R600_BURSTLENGTH_SHIFT 8
1458# define R600_BURSTLENGTH_MASK 0x1
1459
1460#define R600_VM_L2_CNTL 0x1400
1461# define R600_VM_L2_CACHE_EN (1 << 0)
1462# define R600_VM_L2_FRAG_PROC (1 << 1)
1463# define R600_VM_ENABLE_PTE_CACHE_LRU_W (1 << 9)
1464# define R600_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 13)
1465# define R700_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 14)
1466
1467#define R600_VM_L2_CNTL2 0x1404
1468# define R600_VM_L2_CNTL2_INVALIDATE_ALL_L1_TLBS (1 << 0)
1469# define R600_VM_L2_CNTL2_INVALIDATE_L2_CACHE (1 << 1)
1470#define R600_VM_L2_CNTL3 0x1408
1471# define R600_VM_L2_CNTL3_BANK_SELECT_0(x) ((x) << 0)
1472# define R600_VM_L2_CNTL3_BANK_SELECT_1(x) ((x) << 5)
1473# define R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 10)
1474# define R700_VM_L2_CNTL3_BANK_SELECT(x) ((x) << 0)
1475# define R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 6)
1476
1477#define R600_VM_L2_STATUS 0x140c
1478
1479#define R600_VM_CONTEXT0_CNTL 0x1410
1480# define R600_VM_ENABLE_CONTEXT (1 << 0)
1481# define R600_VM_PAGE_TABLE_DEPTH_FLAT (0 << 1)
1482
1483#define R600_VM_CONTEXT0_CNTL2 0x1430
1484#define R600_VM_CONTEXT0_REQUEST_RESPONSE 0x1470
1485#define R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490
1486#define R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14b0
1487#define R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574
1488#define R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594
1489#define R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15b4
1490
1491#define R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c
1492#define R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c
1493#define R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157c
1494
1495#define R600_HDP_HOST_PATH_CNTL 0x2c00
1496
1497#define R600_GRBM_CNTL 0x8000
1498# define R600_GRBM_READ_TIMEOUT(x) ((x) << 0)
1499
1500#define R600_GRBM_STATUS 0x8010
1501# define R600_CMDFIFO_AVAIL_MASK 0x1f
1502# define R700_CMDFIFO_AVAIL_MASK 0xf
1503# define R600_GUI_ACTIVE (1 << 31)
1504#define R600_GRBM_STATUS2 0x8014
1505#define R600_GRBM_SOFT_RESET 0x8020
1506# define R600_SOFT_RESET_CP (1 << 0)
1507#define R600_WAIT_UNTIL 0x8040
1508
1509#define R600_CP_SEM_WAIT_TIMER 0x85bc
1510#define R600_CP_ME_CNTL 0x86d8
1511# define R600_CP_ME_HALT (1 << 28)
1512#define R600_CP_QUEUE_THRESHOLDS 0x8760
1513# define R600_ROQ_IB1_START(x) ((x) << 0)
1514# define R600_ROQ_IB2_START(x) ((x) << 8)
1515#define R600_CP_MEQ_THRESHOLDS 0x8764
1516# define R700_STQ_SPLIT(x) ((x) << 0)
1517# define R600_MEQ_END(x) ((x) << 16)
1518# define R600_ROQ_END(x) ((x) << 24)
1519#define R600_CP_PERFMON_CNTL 0x87fc
1520#define R600_CP_RB_BASE 0xc100
1521#define R600_CP_RB_CNTL 0xc104
1522# define R600_RB_BUFSZ(x) ((x) << 0)
1523# define R600_RB_BLKSZ(x) ((x) << 8)
1524# define R600_BUF_SWAP_32BIT (2 << 16)
1525# define R600_RB_NO_UPDATE (1 << 27)
1526# define R600_RB_RPTR_WR_ENA (1 << 31)
1527#define R600_CP_RB_RPTR_WR 0xc108
1528#define R600_CP_RB_RPTR_ADDR 0xc10c
1529#define R600_CP_RB_RPTR_ADDR_HI 0xc110
1530#define R600_CP_RB_WPTR 0xc114
1531#define R600_CP_RB_WPTR_ADDR 0xc118
1532#define R600_CP_RB_WPTR_ADDR_HI 0xc11c
1533#define R600_CP_RB_RPTR 0x8700
1534#define R600_CP_RB_WPTR_DELAY 0x8704
1535#define R600_CP_PFP_UCODE_ADDR 0xc150
1536#define R600_CP_PFP_UCODE_DATA 0xc154
1537#define R600_CP_ME_RAM_RADDR 0xc158
1538#define R600_CP_ME_RAM_WADDR 0xc15c
1539#define R600_CP_ME_RAM_DATA 0xc160
1540#define R600_CP_DEBUG 0xc1fc
1541
1542#define R600_PA_CL_ENHANCE 0x8a14
1543# define R600_CLIP_VTX_REORDER_ENA (1 << 0)
1544# define R600_NUM_CLIP_SEQ(x) ((x) << 1)
1545#define R600_PA_SC_LINE_STIPPLE_STATE 0x8b10
1546#define R600_PA_SC_MULTI_CHIP_CNTL 0x8b20
1547#define R700_PA_SC_FORCE_EOV_MAX_CNTS 0x8b24
1548# define R700_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
1549# define R700_FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
1550#define R600_PA_SC_AA_SAMPLE_LOCS_2S 0x8b40
1551#define R600_PA_SC_AA_SAMPLE_LOCS_4S 0x8b44
1552#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8b48
1553#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8b4c
1554# define R600_S0_X(x) ((x) << 0)
1555# define R600_S0_Y(x) ((x) << 4)
1556# define R600_S1_X(x) ((x) << 8)
1557# define R600_S1_Y(x) ((x) << 12)
1558# define R600_S2_X(x) ((x) << 16)
1559# define R600_S2_Y(x) ((x) << 20)
1560# define R600_S3_X(x) ((x) << 24)
1561# define R600_S3_Y(x) ((x) << 28)
1562# define R600_S4_X(x) ((x) << 0)
1563# define R600_S4_Y(x) ((x) << 4)
1564# define R600_S5_X(x) ((x) << 8)
1565# define R600_S5_Y(x) ((x) << 12)
1566# define R600_S6_X(x) ((x) << 16)
1567# define R600_S6_Y(x) ((x) << 20)
1568# define R600_S7_X(x) ((x) << 24)
1569# define R600_S7_Y(x) ((x) << 28)
1570#define R600_PA_SC_FIFO_SIZE 0x8bd0
1571# define R600_SC_PRIM_FIFO_SIZE(x) ((x) << 0)
1572# define R600_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 8)
1573# define R600_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 16)
1574#define R700_PA_SC_FIFO_SIZE_R7XX 0x8bcc
1575# define R700_SC_PRIM_FIFO_SIZE(x) ((x) << 0)
1576# define R700_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
1577# define R700_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
1578#define R600_PA_SC_ENHANCE 0x8bf0
1579# define R600_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
1580# define R600_FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12)
1581#define R600_PA_SC_CLIPRECT_RULE 0x2820c
1582#define R700_PA_SC_EDGERULE 0x28230
1583#define R600_PA_SC_LINE_STIPPLE 0x28a0c
1584#define R600_PA_SC_MODE_CNTL 0x28a4c
1585#define R600_PA_SC_AA_CONFIG 0x28c04
1586
1587#define R600_SX_EXPORT_BUFFER_SIZES 0x900c
1588# define R600_COLOR_BUFFER_SIZE(x) ((x) << 0)
1589# define R600_POSITION_BUFFER_SIZE(x) ((x) << 8)
1590# define R600_SMX_BUFFER_SIZE(x) ((x) << 16)
1591#define R600_SX_DEBUG_1 0x9054
1592# define R600_SMX_EVENT_RELEASE (1 << 0)
1593# define R600_ENABLE_NEW_SMX_ADDRESS (1 << 16)
1594#define R700_SX_DEBUG_1 0x9058
1595# define R700_ENABLE_NEW_SMX_ADDRESS (1 << 16)
1596#define R600_SX_MISC 0x28350
1597
1598#define R600_DB_DEBUG 0x9830
1599# define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
1600#define R600_DB_WATERMARKS 0x9838
1601# define R600_DEPTH_FREE(x) ((x) << 0)
1602# define R600_DEPTH_FLUSH(x) ((x) << 5)
1603# define R600_DEPTH_PENDING_FREE(x) ((x) << 15)
1604# define R600_DEPTH_CACHELINE_FREE(x) ((x) << 20)
1605#define R700_DB_DEBUG3 0x98b0
1606# define R700_DB_CLK_OFF_DELAY(x) ((x) << 11)
1607#define RV700_DB_DEBUG4 0x9b8c
1608# define RV700_DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6)
1609
1610#define R600_VGT_CACHE_INVALIDATION 0x88c4
1611# define R600_CACHE_INVALIDATION(x) ((x) << 0)
1612# define R600_VC_ONLY 0
1613# define R600_TC_ONLY 1
1614# define R600_VC_AND_TC 2
1615# define R700_AUTO_INVLD_EN(x) ((x) << 6)
1616# define R700_NO_AUTO 0
1617# define R700_ES_AUTO 1
1618# define R700_GS_AUTO 2
1619# define R700_ES_AND_GS_AUTO 3
1620#define R600_VGT_GS_PER_ES 0x88c8
1621#define R600_VGT_ES_PER_GS 0x88cc
1622#define R600_VGT_GS_PER_VS 0x88e8
1623#define R600_VGT_GS_VERTEX_REUSE 0x88d4
1624#define R600_VGT_NUM_INSTANCES 0x8974
1625#define R600_VGT_STRMOUT_EN 0x28ab0
1626#define R600_VGT_EVENT_INITIATOR 0x28a90
1627# define R600_CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
1628#define R600_VGT_VERTEX_REUSE_BLOCK_CNTL 0x28c58
1629# define R600_VTX_REUSE_DEPTH_MASK 0xff
1630#define R600_VGT_OUT_DEALLOC_CNTL 0x28c5c
1631# define R600_DEALLOC_DIST_MASK 0x7f
1632
1633#define R600_CB_COLOR0_BASE 0x28040
1634#define R600_CB_COLOR1_BASE 0x28044
1635#define R600_CB_COLOR2_BASE 0x28048
1636#define R600_CB_COLOR3_BASE 0x2804c
1637#define R600_CB_COLOR4_BASE 0x28050
1638#define R600_CB_COLOR5_BASE 0x28054
1639#define R600_CB_COLOR6_BASE 0x28058
1640#define R600_CB_COLOR7_BASE 0x2805c
1641#define R600_CB_COLOR7_FRAG 0x280fc
1642
1643#define R600_CB_COLOR0_SIZE 0x28060
1644#define R600_CB_COLOR0_VIEW 0x28080
1645#define R600_CB_COLOR0_INFO 0x280a0
1646#define R600_CB_COLOR0_TILE 0x280c0
1647#define R600_CB_COLOR0_FRAG 0x280e0
1648#define R600_CB_COLOR0_MASK 0x28100
1649
1650#define AVIVO_D1MODE_VLINE_START_END 0x6538
1651#define AVIVO_D2MODE_VLINE_START_END 0x6d38
1652#define R600_CP_COHER_BASE 0x85f8
1653#define R600_DB_DEPTH_BASE 0x2800c
1654#define R600_SQ_PGM_START_FS 0x28894
1655#define R600_SQ_PGM_START_ES 0x28880
1656#define R600_SQ_PGM_START_VS 0x28858
1657#define R600_SQ_PGM_RESOURCES_VS 0x28868
1658#define R600_SQ_PGM_CF_OFFSET_VS 0x288d0
1659#define R600_SQ_PGM_START_GS 0x2886c
1660#define R600_SQ_PGM_START_PS 0x28840
1661#define R600_SQ_PGM_RESOURCES_PS 0x28850
1662#define R600_SQ_PGM_EXPORTS_PS 0x28854
1663#define R600_SQ_PGM_CF_OFFSET_PS 0x288cc
1664#define R600_VGT_DMA_BASE 0x287e8
1665#define R600_VGT_DMA_BASE_HI 0x287e4
1666#define R600_VGT_STRMOUT_BASE_OFFSET_0 0x28b10
1667#define R600_VGT_STRMOUT_BASE_OFFSET_1 0x28b14
1668#define R600_VGT_STRMOUT_BASE_OFFSET_2 0x28b18
1669#define R600_VGT_STRMOUT_BASE_OFFSET_3 0x28b1c
1670#define R600_VGT_STRMOUT_BASE_OFFSET_HI_0 0x28b44
1671#define R600_VGT_STRMOUT_BASE_OFFSET_HI_1 0x28b48
1672#define R600_VGT_STRMOUT_BASE_OFFSET_HI_2 0x28b4c
1673#define R600_VGT_STRMOUT_BASE_OFFSET_HI_3 0x28b50
1674#define R600_VGT_STRMOUT_BUFFER_BASE_0 0x28ad8
1675#define R600_VGT_STRMOUT_BUFFER_BASE_1 0x28ae8
1676#define R600_VGT_STRMOUT_BUFFER_BASE_2 0x28af8
1677#define R600_VGT_STRMOUT_BUFFER_BASE_3 0x28b08
1678#define R600_VGT_STRMOUT_BUFFER_OFFSET_0 0x28adc
1679#define R600_VGT_STRMOUT_BUFFER_OFFSET_1 0x28aec
1680#define R600_VGT_STRMOUT_BUFFER_OFFSET_2 0x28afc
1681#define R600_VGT_STRMOUT_BUFFER_OFFSET_3 0x28b0c
1682
1683#define R600_VGT_PRIMITIVE_TYPE 0x8958
1684
1685#define R600_PA_SC_SCREEN_SCISSOR_TL 0x28030
1686#define R600_PA_SC_GENERIC_SCISSOR_TL 0x28240
1687#define R600_PA_SC_WINDOW_SCISSOR_TL 0x28204
1688
1689#define R600_TC_CNTL 0x9608
1690# define R600_TC_L2_SIZE(x) ((x) << 5)
1691# define R600_L2_DISABLE_LATE_HIT (1 << 9)
1692
1693#define R600_ARB_POP 0x2418
1694# define R600_ENABLE_TC128 (1 << 30)
1695#define R600_ARB_GDEC_RD_CNTL 0x246c
1696
1697#define R600_TA_CNTL_AUX 0x9508
1698# define R600_DISABLE_CUBE_WRAP (1 << 0)
1699# define R600_DISABLE_CUBE_ANISO (1 << 1)
1700# define R700_GETLOD_SELECT(x) ((x) << 2)
1701# define R600_SYNC_GRADIENT (1 << 24)
1702# define R600_SYNC_WALKER (1 << 25)
1703# define R600_SYNC_ALIGNER (1 << 26)
1704# define R600_BILINEAR_PRECISION_6_BIT (0 << 31)
1705# define R600_BILINEAR_PRECISION_8_BIT (1 << 31)
1706
1707#define R700_TCP_CNTL 0x9610
1708
1709#define R600_SMX_DC_CTL0 0xa020
1710# define R700_USE_HASH_FUNCTION (1 << 0)
1711# define R700_CACHE_DEPTH(x) ((x) << 1)
1712# define R700_FLUSH_ALL_ON_EVENT (1 << 10)
1713# define R700_STALL_ON_EVENT (1 << 11)
1714#define R700_SMX_EVENT_CTL 0xa02c
1715# define R700_ES_FLUSH_CTL(x) ((x) << 0)
1716# define R700_GS_FLUSH_CTL(x) ((x) << 3)
1717# define R700_ACK_FLUSH_CTL(x) ((x) << 6)
1718# define R700_SYNC_FLUSH_CTL (1 << 8)
1719
1720#define R600_SQ_CONFIG 0x8c00
1721# define R600_VC_ENABLE (1 << 0)
1722# define R600_EXPORT_SRC_C (1 << 1)
1723# define R600_DX9_CONSTS (1 << 2)
1724# define R600_ALU_INST_PREFER_VECTOR (1 << 3)
1725# define R600_DX10_CLAMP (1 << 4)
1726# define R600_CLAUSE_SEQ_PRIO(x) ((x) << 8)
1727# define R600_PS_PRIO(x) ((x) << 24)
1728# define R600_VS_PRIO(x) ((x) << 26)
1729# define R600_GS_PRIO(x) ((x) << 28)
1730# define R600_ES_PRIO(x) ((x) << 30)
1731#define R600_SQ_GPR_RESOURCE_MGMT_1 0x8c04
1732# define R600_NUM_PS_GPRS(x) ((x) << 0)
1733# define R600_NUM_VS_GPRS(x) ((x) << 16)
1734# define R700_DYN_GPR_ENABLE (1 << 27)
1735# define R600_NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
1736#define R600_SQ_GPR_RESOURCE_MGMT_2 0x8c08
1737# define R600_NUM_GS_GPRS(x) ((x) << 0)
1738# define R600_NUM_ES_GPRS(x) ((x) << 16)
1739#define R600_SQ_THREAD_RESOURCE_MGMT 0x8c0c
1740# define R600_NUM_PS_THREADS(x) ((x) << 0)
1741# define R600_NUM_VS_THREADS(x) ((x) << 8)
1742# define R600_NUM_GS_THREADS(x) ((x) << 16)
1743# define R600_NUM_ES_THREADS(x) ((x) << 24)
1744#define R600_SQ_STACK_RESOURCE_MGMT_1 0x8c10
1745# define R600_NUM_PS_STACK_ENTRIES(x) ((x) << 0)
1746# define R600_NUM_VS_STACK_ENTRIES(x) ((x) << 16)
1747#define R600_SQ_STACK_RESOURCE_MGMT_2 0x8c14
1748# define R600_NUM_GS_STACK_ENTRIES(x) ((x) << 0)
1749# define R600_NUM_ES_STACK_ENTRIES(x) ((x) << 16)
1750#define R600_SQ_MS_FIFO_SIZES 0x8cf0
1751# define R600_CACHE_FIFO_SIZE(x) ((x) << 0)
1752# define R600_FETCH_FIFO_HIWATER(x) ((x) << 8)
1753# define R600_DONE_FIFO_HIWATER(x) ((x) << 16)
1754# define R600_ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
1755#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8db0
1756# define R700_SIMDA_RING0(x) ((x) << 0)
1757# define R700_SIMDA_RING1(x) ((x) << 8)
1758# define R700_SIMDB_RING0(x) ((x) << 16)
1759# define R700_SIMDB_RING1(x) ((x) << 24)
1760#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8db4
1761#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8db8
1762#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8dbc
1763#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8dc0
1764#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8dc4
1765#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8dc8
1766#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8dcc
1767
1768#define R600_SPI_PS_IN_CONTROL_0 0x286cc
1769# define R600_NUM_INTERP(x) ((x) << 0)
1770# define R600_POSITION_ENA (1 << 8)
1771# define R600_POSITION_CENTROID (1 << 9)
1772# define R600_POSITION_ADDR(x) ((x) << 10)
1773# define R600_PARAM_GEN(x) ((x) << 15)
1774# define R600_PARAM_GEN_ADDR(x) ((x) << 19)
1775# define R600_BARYC_SAMPLE_CNTL(x) ((x) << 26)
1776# define R600_PERSP_GRADIENT_ENA (1 << 28)
1777# define R600_LINEAR_GRADIENT_ENA (1 << 29)
1778# define R600_POSITION_SAMPLE (1 << 30)
1779# define R600_BARYC_AT_SAMPLE_ENA (1 << 31)
1780#define R600_SPI_PS_IN_CONTROL_1 0x286d0
1781# define R600_GEN_INDEX_PIX (1 << 0)
1782# define R600_GEN_INDEX_PIX_ADDR(x) ((x) << 1)
1783# define R600_FRONT_FACE_ENA (1 << 8)
1784# define R600_FRONT_FACE_CHAN(x) ((x) << 9)
1785# define R600_FRONT_FACE_ALL_BITS (1 << 11)
1786# define R600_FRONT_FACE_ADDR(x) ((x) << 12)
1787# define R600_FOG_ADDR(x) ((x) << 17)
1788# define R600_FIXED_PT_POSITION_ENA (1 << 24)
1789# define R600_FIXED_PT_POSITION_ADDR(x) ((x) << 25)
1790# define R700_POSITION_ULC (1 << 30)
1791#define R600_SPI_INPUT_Z 0x286d8
1792
1793#define R600_SPI_CONFIG_CNTL 0x9100
1794# define R600_GPR_WRITE_PRIORITY(x) ((x) << 0)
1795# define R600_DISABLE_INTERP_1 (1 << 5)
1796#define R600_SPI_CONFIG_CNTL_1 0x913c
1797# define R600_VTX_DONE_DELAY(x) ((x) << 0)
1798# define R600_INTERP_ONE_PRIM_PER_ROW (1 << 4)
1799
1800#define R600_GB_TILING_CONFIG 0x98f0
1801# define R600_PIPE_TILING(x) ((x) << 1)
1802# define R600_BANK_TILING(x) ((x) << 4)
1803# define R600_GROUP_SIZE(x) ((x) << 6)
1804# define R600_ROW_TILING(x) ((x) << 8)
1805# define R600_BANK_SWAPS(x) ((x) << 11)
1806# define R600_SAMPLE_SPLIT(x) ((x) << 14)
1807# define R600_BACKEND_MAP(x) ((x) << 16)
1808#define R600_DCP_TILING_CONFIG 0x6ca0
1809#define R600_HDP_TILING_CONFIG 0x2f3c
1810
1811#define R600_CC_RB_BACKEND_DISABLE 0x98f4
1812#define R700_CC_SYS_RB_BACKEND_DISABLE 0x3f88
1813# define R600_BACKEND_DISABLE(x) ((x) << 16)
1814
1815#define R600_CC_GC_SHADER_PIPE_CONFIG 0x8950
1816#define R600_GC_USER_SHADER_PIPE_CONFIG 0x8954
1817# define R600_INACTIVE_QD_PIPES(x) ((x) << 8)
1818# define R600_INACTIVE_QD_PIPES_MASK (0xff << 8)
1819# define R600_INACTIVE_SIMDS(x) ((x) << 16)
1820# define R600_INACTIVE_SIMDS_MASK (0xff << 16)
1821
1822#define R700_CGTS_SYS_TCC_DISABLE 0x3f90
1823#define R700_CGTS_USER_SYS_TCC_DISABLE 0x3f94
1824#define R700_CGTS_TCC_DISABLE 0x9148
1825#define R700_CGTS_USER_TCC_DISABLE 0x914c
1826
1827/* Constants */
1828#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
1829
1830#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0
1831#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1
1832#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2
1833#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3
1834#define RADEON_LAST_DISPATCH 1
1835
1836#define R600_LAST_FRAME_REG R600_SCRATCH_REG0
1837#define R600_LAST_DISPATCH_REG R600_SCRATCH_REG1
1838#define R600_LAST_CLEAR_REG R600_SCRATCH_REG2
1839#define R600_LAST_SWI_REG R600_SCRATCH_REG3
1840
1841#define RADEON_MAX_VB_AGE 0x7fffffff
1842#define RADEON_MAX_VB_VERTS (0xffff)
1843
1844#define RADEON_RING_HIGH_MARK 128
1845
1846#define RADEON_PCIGART_TABLE_SIZE (32*1024)
1847
1848#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
1849#define RADEON_WRITE(reg, val) \
1850do { \
1851 if (reg < 0x10000) { \
1852 DRM_WRITE32(dev_priv->mmio, (reg), (val)); \
1853 } else { \
1854 DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, (reg)); \
1855 DRM_WRITE32(dev_priv->mmio, RADEON_MM_DATA, (val)); \
1856 } \
1857} while (0)
1858#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
1859#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
1860
1861#define RADEON_WRITE_PLL(addr, val) \
1862do { \
1863 RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
1864 ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \
1865 RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
1866} while (0)
1867
1868#define RADEON_WRITE_PCIE(addr, val) \
1869do { \
1870 RADEON_WRITE8(RADEON_PCIE_INDEX, \
1871 ((addr) & 0xff)); \
1872 RADEON_WRITE(RADEON_PCIE_DATA, (val)); \
1873} while (0)
1874
1875#define R500_WRITE_MCIND(addr, val) \
1876do { \
1877 RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
1878 RADEON_WRITE(R520_MC_IND_DATA, (val)); \
1879 RADEON_WRITE(R520_MC_IND_INDEX, 0); \
1880} while (0)
1881
1882#define RS480_WRITE_MCIND(addr, val) \
1883do { \
1884 RADEON_WRITE(RS480_NB_MC_INDEX, \
1885 ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \
1886 RADEON_WRITE(RS480_NB_MC_DATA, (val)); \
1887 RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \
1888} while (0)
1889
1890#define RS690_WRITE_MCIND(addr, val) \
1891do { \
1892 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
1893 RADEON_WRITE(RS690_MC_DATA, val); \
1894 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
1895} while (0)
1896
1897#define RS600_WRITE_MCIND(addr, val) \
1898do { \
1899 RADEON_WRITE(RS600_MC_INDEX, RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | ((addr) & RS600_MC_ADDR_MASK)); \
1900 RADEON_WRITE(RS600_MC_DATA, val); \
1901} while (0)
1902
1903#define IGP_WRITE_MCIND(addr, val) \
1904do { \
1905 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \
1906 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \
1907 RS690_WRITE_MCIND(addr, val); \
1908 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) \
1909 RS600_WRITE_MCIND(addr, val); \
1910 else \
1911 RS480_WRITE_MCIND(addr, val); \
1912} while (0)
1913
1914#define CP_PACKET0( reg, n ) \
1915 (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
1916#define CP_PACKET0_TABLE( reg, n ) \
1917 (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
1918#define CP_PACKET1( reg0, reg1 ) \
1919 (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
1920#define CP_PACKET2() \
1921 (RADEON_CP_PACKET2)
1922#define CP_PACKET3( pkt, n ) \
1923 (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
1924
1925/* ================================================================
1926 * Engine control helper macros
1927 */
1928
1929#define RADEON_WAIT_UNTIL_2D_IDLE() do { \
1930 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1931 OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
1932 RADEON_WAIT_HOST_IDLECLEAN) ); \
1933} while (0)
1934
1935#define RADEON_WAIT_UNTIL_3D_IDLE() do { \
1936 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1937 OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \
1938 RADEON_WAIT_HOST_IDLECLEAN) ); \
1939} while (0)
1940
1941#define RADEON_WAIT_UNTIL_IDLE() do { \
1942 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1943 OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
1944 RADEON_WAIT_3D_IDLECLEAN | \
1945 RADEON_WAIT_HOST_IDLECLEAN) ); \
1946} while (0)
1947
1948#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \
1949 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1950 OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \
1951} while (0)
1952
1953#define RADEON_FLUSH_CACHE() do { \
1954 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1955 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
1956 OUT_RING(RADEON_RB3D_DC_FLUSH); \
1957 } else { \
1958 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1959 OUT_RING(R300_RB3D_DC_FLUSH); \
1960 } \
1961} while (0)
1962
1963#define RADEON_PURGE_CACHE() do { \
1964 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1965 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
1966 OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
1967 } else { \
1968 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1969 OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \
1970 } \
1971} while (0)
1972
1973#define RADEON_FLUSH_ZCACHE() do { \
1974 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1975 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
1976 OUT_RING(RADEON_RB3D_ZC_FLUSH); \
1977 } else { \
1978 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
1979 OUT_RING(R300_ZC_FLUSH); \
1980 } \
1981} while (0)
1982
1983#define RADEON_PURGE_ZCACHE() do { \
1984 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1985 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
1986 OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
1987 } else { \
1988 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
1989 OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
1990 } \
1991} while (0)
1992
1993/* ================================================================
1994 * Misc helper macros
1995 */
1996
1997/* Perfbox functionality only.
1998 */
1999#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
2000do { \
2001 if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \
2002 u32 head = GET_RING_HEAD( dev_priv ); \
2003 if (head == dev_priv->ring.tail) \
2004 dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \
2005 } \
2006} while (0)
2007
2008#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
2009do { \
2010 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \
2011 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \
2012 if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
2013 int __ret; \
2014 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \
2015 __ret = r600_do_cp_idle(dev_priv); \
2016 else \
2017 __ret = radeon_do_cp_idle(dev_priv); \
2018 if ( __ret ) return __ret; \
2019 sarea_priv->last_dispatch = 0; \
2020 radeon_freelist_reset( dev ); \
2021 } \
2022} while (0)
2023
2024#define RADEON_DISPATCH_AGE( age ) do { \
2025 OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \
2026 OUT_RING( age ); \
2027} while (0)
2028
2029#define RADEON_FRAME_AGE( age ) do { \
2030 OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \
2031 OUT_RING( age ); \
2032} while (0)
2033
2034#define RADEON_CLEAR_AGE( age ) do { \
2035 OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \
2036 OUT_RING( age ); \
2037} while (0)
2038
2039#define R600_DISPATCH_AGE(age) do { \
2040 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \
2041 OUT_RING((R600_LAST_DISPATCH_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \
2042 OUT_RING(age); \
2043} while (0)
2044
2045#define R600_FRAME_AGE(age) do { \
2046 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \
2047 OUT_RING((R600_LAST_FRAME_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \
2048 OUT_RING(age); \
2049} while (0)
2050
2051#define R600_CLEAR_AGE(age) do { \
2052 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \
2053 OUT_RING((R600_LAST_CLEAR_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \
2054 OUT_RING(age); \
2055} while (0)
2056
2057/* ================================================================
2058 * Ring control
2059 */
2060
2061#define RADEON_VERBOSE 0
2062
2063#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring;
2064
2065#define RADEON_RING_ALIGN 16
2066
2067#define BEGIN_RING( n ) do { \
2068 if ( RADEON_VERBOSE ) { \
2069 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
2070 } \
2071 _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \
2072 _align_nr += n; \
2073 if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \
2074 COMMIT_RING(); \
2075 radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \
2076 } \
2077 _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
2078 ring = dev_priv->ring.start; \
2079 write = dev_priv->ring.tail; \
2080 mask = dev_priv->ring.tail_mask; \
2081} while (0)
2082
2083#define ADVANCE_RING() do { \
2084 if ( RADEON_VERBOSE ) { \
2085 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
2086 write, dev_priv->ring.tail ); \
2087 } \
2088 if (((dev_priv->ring.tail + _nr) & mask) != write) { \
2089 DRM_ERROR( \
2090 "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
2091 ((dev_priv->ring.tail + _nr) & mask), \
2092 write, __LINE__); \
2093 } else \
2094 dev_priv->ring.tail = write; \
2095} while (0)
2096
2097extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
2098
2099#define COMMIT_RING() do { \
2100 radeon_commit_ring(dev_priv); \
2101 } while(0)
2102
2103#define OUT_RING( x ) do { \
2104 if ( RADEON_VERBOSE ) { \
2105 DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
2106 (unsigned int)(x), write ); \
2107 } \
2108 ring[write++] = (x); \
2109 write &= mask; \
2110} while (0)
2111
2112#define OUT_RING_REG( reg, val ) do { \
2113 OUT_RING( CP_PACKET0( reg, 0 ) ); \
2114 OUT_RING( val ); \
2115} while (0)
2116
2117#define OUT_RING_TABLE( tab, sz ) do { \
2118 int _size = (sz); \
2119 int *_tab = (int *)(tab); \
2120 \
2121 if (write + _size > mask) { \
2122 int _i = (mask+1) - write; \
2123 _size -= _i; \
2124 while (_i > 0 ) { \
2125 *(int *)(ring + write) = *_tab++; \
2126 write++; \
2127 _i--; \
2128 } \
2129 write = 0; \
2130 _tab += _i; \
2131 } \
2132 while (_size > 0) { \
2133 *(ring + write) = *_tab++; \
2134 write++; \
2135 _size--; \
2136 } \
2137 write &= mask; \
2138} while (0)
2139
2140/**
2141 * Copy given number of dwords from drm buffer to the ring buffer.
2142 */
2143#define OUT_RING_DRM_BUFFER(buf, sz) do { \
2144 int _size = (sz) * 4; \
2145 struct drm_buffer *_buf = (buf); \
2146 int _part_size; \
2147 while (_size > 0) { \
2148 _part_size = _size; \
2149 \
2150 if (write + _part_size/4 > mask) \
2151 _part_size = ((mask + 1) - write)*4; \
2152 \
2153 if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
2154 _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
2155 \
2156 \
2157 \
2158 memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
2159 [drm_buffer_index(_buf)], _part_size); \
2160 \
2161 _size -= _part_size; \
2162 write = (write + _part_size/4) & mask; \
2163 drm_buffer_advance(_buf, _part_size); \
2164 } \
2165} while (0)
2166
2167
2168#endif /* CONFIG_DRM_RADEON_UMS */
2169
2170#endif /* __RADEON_DRV_H__ */ 122#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index adc44bbc81a9..d2e628eea53d 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -282,7 +282,7 @@ out_unref:
282 282
283 } 283 }
284 if (fb && ret) { 284 if (fb && ret) {
285 drm_gem_object_unreference(gobj); 285 drm_gem_object_unreference_unlocked(gobj);
286 drm_framebuffer_unregister_private(fb); 286 drm_framebuffer_unregister_private(fb);
287 drm_framebuffer_cleanup(fb); 287 drm_framebuffer_cleanup(fb);
288 kfree(fb); 288 kfree(fb);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index df09ca7c4889..05815c47b246 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -130,7 +130,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
130 struct radeon_fence **fence, 130 struct radeon_fence **fence,
131 int ring) 131 int ring)
132{ 132{
133 u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; 133 u64 seq;
134 134
135 /* we are protected by the ring emission mutex */ 135 /* we are protected by the ring emission mutex */
136 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 136 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
@@ -138,7 +138,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
138 return -ENOMEM; 138 return -ENOMEM;
139 } 139 }
140 (*fence)->rdev = rdev; 140 (*fence)->rdev = rdev;
141 (*fence)->seq = seq; 141 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
142 (*fence)->ring = ring; 142 (*fence)->ring = ring;
143 (*fence)->is_vm_update = false; 143 (*fence)->is_vm_update = false;
144 fence_init(&(*fence)->base, &radeon_fence_ops, 144 fence_init(&(*fence)->base, &radeon_fence_ops,
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
deleted file mode 100644
index 688afb62f7c4..000000000000
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ /dev/null
@@ -1,402 +0,0 @@
1/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
2/*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Michel D�zer <michel@daenzer.net>
31 *
32 * ------------------------ This file is DEPRECATED! -------------------------
33 */
34
35#include <drm/drmP.h>
36#include <drm/radeon_drm.h>
37#include "radeon_drv.h"
38
39void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
40{
41 drm_radeon_private_t *dev_priv = dev->dev_private;
42
43 if (state)
44 dev_priv->irq_enable_reg |= mask;
45 else
46 dev_priv->irq_enable_reg &= ~mask;
47
48 if (dev->irq_enabled)
49 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
50}
51
52static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
53{
54 drm_radeon_private_t *dev_priv = dev->dev_private;
55
56 if (state)
57 dev_priv->r500_disp_irq_reg |= mask;
58 else
59 dev_priv->r500_disp_irq_reg &= ~mask;
60
61 if (dev->irq_enabled)
62 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
63}
64
65int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe)
66{
67 drm_radeon_private_t *dev_priv = dev->dev_private;
68
69 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
70 switch (pipe) {
71 case 0:
72 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
73 break;
74 case 1:
75 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
76 break;
77 default:
78 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
79 pipe);
80 return -EINVAL;
81 }
82 } else {
83 switch (pipe) {
84 case 0:
85 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
86 break;
87 case 1:
88 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
89 break;
90 default:
91 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
92 pipe);
93 return -EINVAL;
94 }
95 }
96
97 return 0;
98}
99
100void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe)
101{
102 drm_radeon_private_t *dev_priv = dev->dev_private;
103
104 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
105 switch (pipe) {
106 case 0:
107 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
108 break;
109 case 1:
110 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
111 break;
112 default:
113 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
114 pipe);
115 break;
116 }
117 } else {
118 switch (pipe) {
119 case 0:
120 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
121 break;
122 case 1:
123 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
124 break;
125 default:
126 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
127 pipe);
128 break;
129 }
130 }
131}
132
133static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
134{
135 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
136 u32 irq_mask = RADEON_SW_INT_TEST;
137
138 *r500_disp_int = 0;
139 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
140 /* vbl interrupts in a different place */
141
142 if (irqs & R500_DISPLAY_INT_STATUS) {
143 /* if a display interrupt */
144 u32 disp_irq;
145
146 disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
147
148 *r500_disp_int = disp_irq;
149 if (disp_irq & R500_D1_VBLANK_INTERRUPT)
150 RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
151 if (disp_irq & R500_D2_VBLANK_INTERRUPT)
152 RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
153 }
154 irq_mask |= R500_DISPLAY_INT_STATUS;
155 } else
156 irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
157
158 irqs &= irq_mask;
159
160 if (irqs)
161 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
162
163 return irqs;
164}
165
166/* Interrupts - Used for device synchronization and flushing in the
167 * following circumstances:
168 *
169 * - Exclusive FB access with hw idle:
170 * - Wait for GUI Idle (?) interrupt, then do normal flush.
171 *
172 * - Frame throttling, NV_fence:
173 * - Drop marker irq's into command stream ahead of time.
174 * - Wait on irq's with lock *not held*
175 * - Check each for termination condition
176 *
177 * - Internally in cp_getbuffer, etc:
178 * - as above, but wait with lock held???
179 *
180 * NOTE: These functions are misleadingly named -- the irq's aren't
181 * tied to dma at all, this is just a hangover from dri prehistory.
182 */
183
184irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
185{
186 struct drm_device *dev = (struct drm_device *) arg;
187 drm_radeon_private_t *dev_priv =
188 (drm_radeon_private_t *) dev->dev_private;
189 u32 stat;
190 u32 r500_disp_int;
191
192 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
193 return IRQ_NONE;
194
195 /* Only consider the bits we're interested in - others could be used
196 * outside the DRM
197 */
198 stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
199 if (!stat)
200 return IRQ_NONE;
201
202 stat &= dev_priv->irq_enable_reg;
203
204 /* SW interrupt */
205 if (stat & RADEON_SW_INT_TEST)
206 wake_up(&dev_priv->swi_queue);
207
208 /* VBLANK interrupt */
209 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
210 if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
211 drm_handle_vblank(dev, 0);
212 if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
213 drm_handle_vblank(dev, 1);
214 } else {
215 if (stat & RADEON_CRTC_VBLANK_STAT)
216 drm_handle_vblank(dev, 0);
217 if (stat & RADEON_CRTC2_VBLANK_STAT)
218 drm_handle_vblank(dev, 1);
219 }
220 return IRQ_HANDLED;
221}
222
223static int radeon_emit_irq(struct drm_device * dev)
224{
225 drm_radeon_private_t *dev_priv = dev->dev_private;
226 unsigned int ret;
227 RING_LOCALS;
228
229 atomic_inc(&dev_priv->swi_emitted);
230 ret = atomic_read(&dev_priv->swi_emitted);
231
232 BEGIN_RING(4);
233 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
234 OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
235 ADVANCE_RING();
236 COMMIT_RING();
237
238 return ret;
239}
240
241static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
242{
243 drm_radeon_private_t *dev_priv =
244 (drm_radeon_private_t *) dev->dev_private;
245 int ret = 0;
246
247 if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
248 return 0;
249
250 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
251
252 DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ,
253 RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
254
255 return ret;
256}
257
258u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
259{
260 drm_radeon_private_t *dev_priv = dev->dev_private;
261
262 if (!dev_priv) {
263 DRM_ERROR("called with no initialization\n");
264 return -EINVAL;
265 }
266
267 if (pipe > 1) {
268 DRM_ERROR("Invalid crtc %u\n", pipe);
269 return -EINVAL;
270 }
271
272 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
273 if (pipe == 0)
274 return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
275 else
276 return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
277 } else {
278 if (pipe == 0)
279 return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
280 else
281 return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
282 }
283}
284
285/* Needs the lock as it touches the ring.
286 */
287int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
288{
289 drm_radeon_private_t *dev_priv = dev->dev_private;
290 drm_radeon_irq_emit_t *emit = data;
291 int result;
292
293 if (!dev_priv) {
294 DRM_ERROR("called with no initialization\n");
295 return -EINVAL;
296 }
297
298 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
299 return -EINVAL;
300
301 LOCK_TEST_WITH_RETURN(dev, file_priv);
302
303 result = radeon_emit_irq(dev);
304
305 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
306 DRM_ERROR("copy_to_user\n");
307 return -EFAULT;
308 }
309
310 return 0;
311}
312
313/* Doesn't need the hardware lock.
314 */
315int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
316{
317 drm_radeon_private_t *dev_priv = dev->dev_private;
318 drm_radeon_irq_wait_t *irqwait = data;
319
320 if (!dev_priv) {
321 DRM_ERROR("called with no initialization\n");
322 return -EINVAL;
323 }
324
325 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
326 return -EINVAL;
327
328 return radeon_wait_irq(dev, irqwait->irq_seq);
329}
330
331/* drm_dma.h hooks
332*/
333void radeon_driver_irq_preinstall(struct drm_device * dev)
334{
335 drm_radeon_private_t *dev_priv =
336 (drm_radeon_private_t *) dev->dev_private;
337 u32 dummy;
338
339 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
340 return;
341
342 /* Disable *all* interrupts */
343 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
344 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
345 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
346
347 /* Clear bits if they're already high */
348 radeon_acknowledge_irqs(dev_priv, &dummy);
349}
350
351int radeon_driver_irq_postinstall(struct drm_device *dev)
352{
353 drm_radeon_private_t *dev_priv =
354 (drm_radeon_private_t *) dev->dev_private;
355
356 atomic_set(&dev_priv->swi_emitted, 0);
357 init_waitqueue_head(&dev_priv->swi_queue);
358
359 dev->max_vblank_count = 0x001fffff;
360
361 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
362 return 0;
363
364 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
365
366 return 0;
367}
368
369void radeon_driver_irq_uninstall(struct drm_device * dev)
370{
371 drm_radeon_private_t *dev_priv =
372 (drm_radeon_private_t *) dev->dev_private;
373 if (!dev_priv)
374 return;
375
376 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
377 return;
378
379 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
380 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
381 /* Disable *all* interrupts */
382 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
383}
384
385
386int radeon_vblank_crtc_get(struct drm_device *dev)
387{
388 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
389
390 return dev_priv->vblank_crtc;
391}
392
393int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
394{
395 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
396 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
397 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
398 return -EINVAL;
399 }
400 dev_priv->vblank_crtc = (unsigned int)value;
401 return 0;
402}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d290a8a09036..414953c46a38 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -748,19 +748,19 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
748 * radeon_get_vblank_counter_kms - get frame count 748 * radeon_get_vblank_counter_kms - get frame count
749 * 749 *
750 * @dev: drm dev pointer 750 * @dev: drm dev pointer
751 * @crtc: crtc to get the frame count from 751 * @pipe: crtc to get the frame count from
752 * 752 *
753 * Gets the frame count on the requested crtc (all asics). 753 * Gets the frame count on the requested crtc (all asics).
754 * Returns frame count on success, -EINVAL on failure. 754 * Returns frame count on success, -EINVAL on failure.
755 */ 755 */
756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
757{ 757{
758 int vpos, hpos, stat; 758 int vpos, hpos, stat;
759 u32 count; 759 u32 count;
760 struct radeon_device *rdev = dev->dev_private; 760 struct radeon_device *rdev = dev->dev_private;
761 761
762 if (crtc < 0 || crtc >= rdev->num_crtc) { 762 if (pipe >= rdev->num_crtc) {
763 DRM_ERROR("Invalid crtc %d\n", crtc); 763 DRM_ERROR("Invalid crtc %u\n", pipe);
764 return -EINVAL; 764 return -EINVAL;
765 } 765 }
766 766
@@ -772,29 +772,29 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
772 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 772 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
773 * result by 1 to give the proper appearance to caller. 773 * result by 1 to give the proper appearance to caller.
774 */ 774 */
775 if (rdev->mode_info.crtcs[crtc]) { 775 if (rdev->mode_info.crtcs[pipe]) {
776 /* Repeat readout if needed to provide stable result if 776 /* Repeat readout if needed to provide stable result if
777 * we cross start of vsync during the queries. 777 * we cross start of vsync during the queries.
778 */ 778 */
779 do { 779 do {
780 count = radeon_get_vblank_counter(rdev, crtc); 780 count = radeon_get_vblank_counter(rdev, pipe);
781 /* Ask radeon_get_crtc_scanoutpos to return vpos as 781 /* Ask radeon_get_crtc_scanoutpos to return vpos as
782 * distance to start of vblank, instead of regular 782 * distance to start of vblank, instead of regular
783 * vertical scanout pos. 783 * vertical scanout pos.
784 */ 784 */
785 stat = radeon_get_crtc_scanoutpos( 785 stat = radeon_get_crtc_scanoutpos(
786 dev, crtc, GET_DISTANCE_TO_VBLANKSTART, 786 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
787 &vpos, &hpos, NULL, NULL, 787 &vpos, &hpos, NULL, NULL,
788 &rdev->mode_info.crtcs[crtc]->base.hwmode); 788 &rdev->mode_info.crtcs[pipe]->base.hwmode);
789 } while (count != radeon_get_vblank_counter(rdev, crtc)); 789 } while (count != radeon_get_vblank_counter(rdev, pipe));
790 790
791 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 791 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
792 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 792 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
793 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 793 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
794 } 794 }
795 else { 795 else {
796 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 796 DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
797 crtc, vpos); 797 pipe, vpos);
798 798
799 /* Bump counter if we are at >= leading edge of vblank, 799 /* Bump counter if we are at >= leading edge of vblank,
800 * but before vsync where vpos would turn negative and 800 * but before vsync where vpos would turn negative and
@@ -806,7 +806,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
806 } 806 }
807 else { 807 else {
808 /* Fallback to use value as is. */ 808 /* Fallback to use value as is. */
809 count = radeon_get_vblank_counter(rdev, crtc); 809 count = radeon_get_vblank_counter(rdev, pipe);
810 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 810 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
811 } 811 }
812 812
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 678b4386540d..32b338ff436b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -25,6 +25,7 @@
25 */ 25 */
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/drm_fb_helper.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include <drm/drm_fixed.h> 30#include <drm/drm_fixed.h>
30#include "radeon.h" 31#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 30de43366eae..88dc973fb209 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1772,7 +1772,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1772 switch (radeon_encoder->encoder_id) { 1772 switch (radeon_encoder->encoder_id) {
1773 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1773 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1774 encoder->possible_crtcs = 0x1; 1774 encoder->possible_crtcs = 0x1;
1775 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); 1775 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs,
1776 DRM_MODE_ENCODER_LVDS, NULL);
1776 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); 1777 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
1777 if (rdev->is_atom_bios) 1778 if (rdev->is_atom_bios)
1778 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 1779 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
@@ -1781,12 +1782,14 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1781 radeon_encoder->rmx_type = RMX_FULL; 1782 radeon_encoder->rmx_type = RMX_FULL;
1782 break; 1783 break;
1783 case ENCODER_OBJECT_ID_INTERNAL_TMDS1: 1784 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1784 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); 1785 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs,
1786 DRM_MODE_ENCODER_TMDS, NULL);
1785 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); 1787 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
1786 radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); 1788 radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder);
1787 break; 1789 break;
1788 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1790 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1789 drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); 1791 drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs,
1792 DRM_MODE_ENCODER_DAC, NULL);
1790 drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); 1793 drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
1791 if (rdev->is_atom_bios) 1794 if (rdev->is_atom_bios)
1792 radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder); 1795 radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
@@ -1794,7 +1797,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1794 radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder); 1797 radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
1795 break; 1798 break;
1796 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1799 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1797 drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC); 1800 drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs,
1801 DRM_MODE_ENCODER_TVDAC, NULL);
1798 drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); 1802 drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
1799 if (rdev->is_atom_bios) 1803 if (rdev->is_atom_bios)
1800 radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder); 1804 radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
@@ -1802,7 +1806,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1802 radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder); 1806 radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
1803 break; 1807 break;
1804 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1808 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1805 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); 1809 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs,
1810 DRM_MODE_ENCODER_TMDS, NULL);
1806 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); 1811 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
1807 if (!rdev->is_atom_bios) 1812 if (!rdev->is_atom_bios)
1808 radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); 1813 radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
deleted file mode 100644
index 146d253f1131..000000000000
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ /dev/null
@@ -1,302 +0,0 @@
1/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
2/*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 * ------------------------ This file is DEPRECATED! -------------------------
32 */
33
34#include <drm/drmP.h>
35#include <drm/radeon_drm.h>
36#include "radeon_drv.h"
37
38/* Very simple allocator for GART memory, working on a static range
39 * already mapped into each client's address space.
40 */
41
42static struct mem_block *split_block(struct mem_block *p, int start, int size,
43 struct drm_file *file_priv)
44{
45 /* Maybe cut off the start of an existing block */
46 if (start > p->start) {
47 struct mem_block *newblock = kmalloc(sizeof(*newblock),
48 GFP_KERNEL);
49 if (!newblock)
50 goto out;
51 newblock->start = start;
52 newblock->size = p->size - (start - p->start);
53 newblock->file_priv = NULL;
54 newblock->next = p->next;
55 newblock->prev = p;
56 p->next->prev = newblock;
57 p->next = newblock;
58 p->size -= newblock->size;
59 p = newblock;
60 }
61
62 /* Maybe cut off the end of an existing block */
63 if (size < p->size) {
64 struct mem_block *newblock = kmalloc(sizeof(*newblock),
65 GFP_KERNEL);
66 if (!newblock)
67 goto out;
68 newblock->start = start + size;
69 newblock->size = p->size - size;
70 newblock->file_priv = NULL;
71 newblock->next = p->next;
72 newblock->prev = p;
73 p->next->prev = newblock;
74 p->next = newblock;
75 p->size = size;
76 }
77
78 out:
79 /* Our block is in the middle */
80 p->file_priv = file_priv;
81 return p;
82}
83
84static struct mem_block *alloc_block(struct mem_block *heap, int size,
85 int align2, struct drm_file *file_priv)
86{
87 struct mem_block *p;
88 int mask = (1 << align2) - 1;
89
90 list_for_each(p, heap) {
91 int start = (p->start + mask) & ~mask;
92 if (p->file_priv == NULL && start + size <= p->start + p->size)
93 return split_block(p, start, size, file_priv);
94 }
95
96 return NULL;
97}
98
99static struct mem_block *find_block(struct mem_block *heap, int start)
100{
101 struct mem_block *p;
102
103 list_for_each(p, heap)
104 if (p->start == start)
105 return p;
106
107 return NULL;
108}
109
110static void free_block(struct mem_block *p)
111{
112 p->file_priv = NULL;
113
114 /* Assumes a single contiguous range. Needs a special file_priv in
115 * 'heap' to stop it being subsumed.
116 */
117 if (p->next->file_priv == NULL) {
118 struct mem_block *q = p->next;
119 p->size += q->size;
120 p->next = q->next;
121 p->next->prev = p;
122 kfree(q);
123 }
124
125 if (p->prev->file_priv == NULL) {
126 struct mem_block *q = p->prev;
127 q->size += p->size;
128 q->next = p->next;
129 q->next->prev = q;
130 kfree(p);
131 }
132}
133
134/* Initialize. How to check for an uninitialized heap?
135 */
136static int init_heap(struct mem_block **heap, int start, int size)
137{
138 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
139
140 if (!blocks)
141 return -ENOMEM;
142
143 *heap = kzalloc(sizeof(**heap), GFP_KERNEL);
144 if (!*heap) {
145 kfree(blocks);
146 return -ENOMEM;
147 }
148
149 blocks->start = start;
150 blocks->size = size;
151 blocks->file_priv = NULL;
152 blocks->next = blocks->prev = *heap;
153
154 (*heap)->file_priv = (struct drm_file *) - 1;
155 (*heap)->next = (*heap)->prev = blocks;
156 return 0;
157}
158
159/* Free all blocks associated with the releasing file.
160 */
161void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
162{
163 struct mem_block *p;
164
165 if (!heap || !heap->next)
166 return;
167
168 list_for_each(p, heap) {
169 if (p->file_priv == file_priv)
170 p->file_priv = NULL;
171 }
172
173 /* Assumes a single contiguous range. Needs a special file_priv in
174 * 'heap' to stop it being subsumed.
175 */
176 list_for_each(p, heap) {
177 while (p->file_priv == NULL && p->next->file_priv == NULL) {
178 struct mem_block *q = p->next;
179 p->size += q->size;
180 p->next = q->next;
181 p->next->prev = p;
182 kfree(q);
183 }
184 }
185}
186
187/* Shutdown.
188 */
189void radeon_mem_takedown(struct mem_block **heap)
190{
191 struct mem_block *p;
192
193 if (!*heap)
194 return;
195
196 for (p = (*heap)->next; p != *heap;) {
197 struct mem_block *q = p;
198 p = p->next;
199 kfree(q);
200 }
201
202 kfree(*heap);
203 *heap = NULL;
204}
205
206/* IOCTL HANDLERS */
207
208static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
209{
210 switch (region) {
211 case RADEON_MEM_REGION_GART:
212 return &dev_priv->gart_heap;
213 case RADEON_MEM_REGION_FB:
214 return &dev_priv->fb_heap;
215 default:
216 return NULL;
217 }
218}
219
220int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
221{
222 drm_radeon_private_t *dev_priv = dev->dev_private;
223 drm_radeon_mem_alloc_t *alloc = data;
224 struct mem_block *block, **heap;
225
226 if (!dev_priv) {
227 DRM_ERROR("called with no initialization\n");
228 return -EINVAL;
229 }
230
231 heap = get_heap(dev_priv, alloc->region);
232 if (!heap || !*heap)
233 return -EFAULT;
234
235 /* Make things easier on ourselves: all allocations at least
236 * 4k aligned.
237 */
238 if (alloc->alignment < 12)
239 alloc->alignment = 12;
240
241 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
242
243 if (!block)
244 return -ENOMEM;
245
246 if (copy_to_user(alloc->region_offset, &block->start,
247 sizeof(int))) {
248 DRM_ERROR("copy_to_user\n");
249 return -EFAULT;
250 }
251
252 return 0;
253}
254
255int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
256{
257 drm_radeon_private_t *dev_priv = dev->dev_private;
258 drm_radeon_mem_free_t *memfree = data;
259 struct mem_block *block, **heap;
260
261 if (!dev_priv) {
262 DRM_ERROR("called with no initialization\n");
263 return -EINVAL;
264 }
265
266 heap = get_heap(dev_priv, memfree->region);
267 if (!heap || !*heap)
268 return -EFAULT;
269
270 block = find_block(*heap, memfree->region_offset);
271 if (!block)
272 return -EFAULT;
273
274 if (block->file_priv != file_priv)
275 return -EPERM;
276
277 free_block(block);
278 return 0;
279}
280
281int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
282{
283 drm_radeon_private_t *dev_priv = dev->dev_private;
284 drm_radeon_mem_init_heap_t *initheap = data;
285 struct mem_block **heap;
286
287 if (!dev_priv) {
288 DRM_ERROR("called with no initialization\n");
289 return -EINVAL;
290 }
291
292 heap = get_heap(dev_priv, initheap->region);
293 if (!heap)
294 return -EFAULT;
295
296 if (*heap) {
297 DRM_ERROR("heap already initialized?");
298 return -EFAULT;
299 }
300
301 return init_heap(heap, initheap->start, initheap->size);
302}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index cddd41b32eda..bb75201a24ba 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -757,8 +757,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
757extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 757extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
758extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder, 758extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
759 struct drm_connector *connector); 759 struct drm_connector *connector);
760int radeon_dp_get_max_link_rate(struct drm_connector *connector, 760extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
761 const u8 *dpcd); 761 const u8 *dpcd,
762 unsigned pix_clock,
763 unsigned *dp_lanes, unsigned *dp_rate);
762extern void radeon_dp_set_rx_power_state(struct drm_connector *connector, 764extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
763 u8 power_state); 765 u8 power_state);
764extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector); 766extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 59abebd6b5dc..460c8f2989da 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -713,7 +713,7 @@ static struct attribute *hwmon_attributes[] = {
713static umode_t hwmon_attributes_visible(struct kobject *kobj, 713static umode_t hwmon_attributes_visible(struct kobject *kobj,
714 struct attribute *attr, int index) 714 struct attribute *attr, int index)
715{ 715{
716 struct device *dev = container_of(kobj, struct device, kobj); 716 struct device *dev = kobj_to_dev(kobj);
717 struct radeon_device *rdev = dev_get_drvdata(dev); 717 struct radeon_device *rdev = dev_get_drvdata(dev);
718 umode_t effective_mode = attr->mode; 718 umode_t effective_mode = attr->mode;
719 719
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
deleted file mode 100644
index 15aee723db77..000000000000
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ /dev/null
@@ -1,3261 +0,0 @@
1/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
2/*
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com>
28 *
29 * ------------------------ This file is DEPRECATED! -------------------------
30 */
31
32#include <drm/drmP.h>
33#include <drm/radeon_drm.h>
34#include "radeon_drv.h"
35#include "drm_buffer.h"
36
37/* ================================================================
38 * Helper functions for client state checking and fixup
39 */
40
41static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
42 dev_priv,
43 struct drm_file * file_priv,
44 u32 *offset)
45{
46 u64 off = *offset;
47 u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
48 struct drm_radeon_driver_file_fields *radeon_priv;
49
50 /* Hrm ... the story of the offset ... So this function converts
51 * the various ideas of what userland clients might have for an
52 * offset in the card address space into an offset into the card
53 * address space :) So with a sane client, it should just keep
54 * the value intact and just do some boundary checking. However,
55 * not all clients are sane. Some older clients pass us 0 based
56 * offsets relative to the start of the framebuffer and some may
57 * assume the AGP aperture it appended to the framebuffer, so we
58 * try to detect those cases and fix them up.
59 *
60 * Note: It might be a good idea here to make sure the offset lands
61 * in some "allowed" area to protect things like the PCIE GART...
62 */
63
64 /* First, the best case, the offset already lands in either the
65 * framebuffer or the GART mapped space
66 */
67 if (radeon_check_offset(dev_priv, off))
68 return 0;
69
70 /* Ok, that didn't happen... now check if we have a zero based
71 * offset that fits in the framebuffer + gart space, apply the
72 * magic offset we get from SETPARAM or calculated from fb_location
73 */
74 if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
75 radeon_priv = file_priv->driver_priv;
76 off += radeon_priv->radeon_fb_delta;
77 }
78
79 /* Finally, assume we aimed at a GART offset if beyond the fb */
80 if (off > fb_end)
81 off = off - fb_end - 1 + dev_priv->gart_vm_start;
82
83 /* Now recheck and fail if out of bounds */
84 if (radeon_check_offset(dev_priv, off)) {
85 DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
86 *offset = off;
87 return 0;
88 }
89 return -EINVAL;
90}
91
92static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
93 dev_priv,
94 struct drm_file *file_priv,
95 int id, struct drm_buffer *buf)
96{
97 u32 *data;
98 switch (id) {
99
100 case RADEON_EMIT_PP_MISC:
101 data = drm_buffer_pointer_to_dword(buf,
102 (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
103
104 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
105 DRM_ERROR("Invalid depth buffer offset\n");
106 return -EINVAL;
107 }
108 dev_priv->have_z_offset = 1;
109 break;
110
111 case RADEON_EMIT_PP_CNTL:
112 data = drm_buffer_pointer_to_dword(buf,
113 (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
114
115 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
116 DRM_ERROR("Invalid colour buffer offset\n");
117 return -EINVAL;
118 }
119 break;
120
121 case R200_EMIT_PP_TXOFFSET_0:
122 case R200_EMIT_PP_TXOFFSET_1:
123 case R200_EMIT_PP_TXOFFSET_2:
124 case R200_EMIT_PP_TXOFFSET_3:
125 case R200_EMIT_PP_TXOFFSET_4:
126 case R200_EMIT_PP_TXOFFSET_5:
127 data = drm_buffer_pointer_to_dword(buf, 0);
128 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
129 DRM_ERROR("Invalid R200 texture offset\n");
130 return -EINVAL;
131 }
132 break;
133
134 case RADEON_EMIT_PP_TXFILTER_0:
135 case RADEON_EMIT_PP_TXFILTER_1:
136 case RADEON_EMIT_PP_TXFILTER_2:
137 data = drm_buffer_pointer_to_dword(buf,
138 (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
139 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
140 DRM_ERROR("Invalid R100 texture offset\n");
141 return -EINVAL;
142 }
143 break;
144
145 case R200_EMIT_PP_CUBIC_OFFSETS_0:
146 case R200_EMIT_PP_CUBIC_OFFSETS_1:
147 case R200_EMIT_PP_CUBIC_OFFSETS_2:
148 case R200_EMIT_PP_CUBIC_OFFSETS_3:
149 case R200_EMIT_PP_CUBIC_OFFSETS_4:
150 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
151 int i;
152 for (i = 0; i < 5; i++) {
153 data = drm_buffer_pointer_to_dword(buf, i);
154 if (radeon_check_and_fixup_offset(dev_priv,
155 file_priv,
156 data)) {
157 DRM_ERROR
158 ("Invalid R200 cubic texture offset\n");
159 return -EINVAL;
160 }
161 }
162 break;
163 }
164
165 case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
166 case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
167 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
168 int i;
169 for (i = 0; i < 5; i++) {
170 data = drm_buffer_pointer_to_dword(buf, i);
171 if (radeon_check_and_fixup_offset(dev_priv,
172 file_priv,
173 data)) {
174 DRM_ERROR
175 ("Invalid R100 cubic texture offset\n");
176 return -EINVAL;
177 }
178 }
179 }
180 break;
181
182 case R200_EMIT_VAP_CTL:{
183 RING_LOCALS;
184 BEGIN_RING(2);
185 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
186 ADVANCE_RING();
187 }
188 break;
189
190 case RADEON_EMIT_RB3D_COLORPITCH:
191 case RADEON_EMIT_RE_LINE_PATTERN:
192 case RADEON_EMIT_SE_LINE_WIDTH:
193 case RADEON_EMIT_PP_LUM_MATRIX:
194 case RADEON_EMIT_PP_ROT_MATRIX_0:
195 case RADEON_EMIT_RB3D_STENCILREFMASK:
196 case RADEON_EMIT_SE_VPORT_XSCALE:
197 case RADEON_EMIT_SE_CNTL:
198 case RADEON_EMIT_SE_CNTL_STATUS:
199 case RADEON_EMIT_RE_MISC:
200 case RADEON_EMIT_PP_BORDER_COLOR_0:
201 case RADEON_EMIT_PP_BORDER_COLOR_1:
202 case RADEON_EMIT_PP_BORDER_COLOR_2:
203 case RADEON_EMIT_SE_ZBIAS_FACTOR:
204 case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
205 case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
206 case R200_EMIT_PP_TXCBLEND_0:
207 case R200_EMIT_PP_TXCBLEND_1:
208 case R200_EMIT_PP_TXCBLEND_2:
209 case R200_EMIT_PP_TXCBLEND_3:
210 case R200_EMIT_PP_TXCBLEND_4:
211 case R200_EMIT_PP_TXCBLEND_5:
212 case R200_EMIT_PP_TXCBLEND_6:
213 case R200_EMIT_PP_TXCBLEND_7:
214 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
215 case R200_EMIT_TFACTOR_0:
216 case R200_EMIT_VTX_FMT_0:
217 case R200_EMIT_MATRIX_SELECT_0:
218 case R200_EMIT_TEX_PROC_CTL_2:
219 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
220 case R200_EMIT_PP_TXFILTER_0:
221 case R200_EMIT_PP_TXFILTER_1:
222 case R200_EMIT_PP_TXFILTER_2:
223 case R200_EMIT_PP_TXFILTER_3:
224 case R200_EMIT_PP_TXFILTER_4:
225 case R200_EMIT_PP_TXFILTER_5:
226 case R200_EMIT_VTE_CNTL:
227 case R200_EMIT_OUTPUT_VTX_COMP_SEL:
228 case R200_EMIT_PP_TAM_DEBUG3:
229 case R200_EMIT_PP_CNTL_X:
230 case R200_EMIT_RB3D_DEPTHXY_OFFSET:
231 case R200_EMIT_RE_AUX_SCISSOR_CNTL:
232 case R200_EMIT_RE_SCISSOR_TL_0:
233 case R200_EMIT_RE_SCISSOR_TL_1:
234 case R200_EMIT_RE_SCISSOR_TL_2:
235 case R200_EMIT_SE_VAP_CNTL_STATUS:
236 case R200_EMIT_SE_VTX_STATE_CNTL:
237 case R200_EMIT_RE_POINTSIZE:
238 case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
239 case R200_EMIT_PP_CUBIC_FACES_0:
240 case R200_EMIT_PP_CUBIC_FACES_1:
241 case R200_EMIT_PP_CUBIC_FACES_2:
242 case R200_EMIT_PP_CUBIC_FACES_3:
243 case R200_EMIT_PP_CUBIC_FACES_4:
244 case R200_EMIT_PP_CUBIC_FACES_5:
245 case RADEON_EMIT_PP_TEX_SIZE_0:
246 case RADEON_EMIT_PP_TEX_SIZE_1:
247 case RADEON_EMIT_PP_TEX_SIZE_2:
248 case R200_EMIT_RB3D_BLENDCOLOR:
249 case R200_EMIT_TCL_POINT_SPRITE_CNTL:
250 case RADEON_EMIT_PP_CUBIC_FACES_0:
251 case RADEON_EMIT_PP_CUBIC_FACES_1:
252 case RADEON_EMIT_PP_CUBIC_FACES_2:
253 case R200_EMIT_PP_TRI_PERF_CNTL:
254 case R200_EMIT_PP_AFS_0:
255 case R200_EMIT_PP_AFS_1:
256 case R200_EMIT_ATF_TFACTOR:
257 case R200_EMIT_PP_TXCTLALL_0:
258 case R200_EMIT_PP_TXCTLALL_1:
259 case R200_EMIT_PP_TXCTLALL_2:
260 case R200_EMIT_PP_TXCTLALL_3:
261 case R200_EMIT_PP_TXCTLALL_4:
262 case R200_EMIT_PP_TXCTLALL_5:
263 case R200_EMIT_VAP_PVS_CNTL:
264 /* These packets don't contain memory offsets */
265 break;
266
267 default:
268 DRM_ERROR("Unknown state packet ID %d\n", id);
269 return -EINVAL;
270 }
271
272 return 0;
273}
274
275static int radeon_check_and_fixup_packet3(drm_radeon_private_t *
276 dev_priv,
277 struct drm_file *file_priv,
278 drm_radeon_kcmd_buffer_t *
279 cmdbuf,
280 unsigned int *cmdsz)
281{
282 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
283 u32 offset, narrays;
284 int count, i, k;
285
286 count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
287 *cmdsz = 2 + count;
288
289 if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
290 DRM_ERROR("Not a type 3 packet\n");
291 return -EINVAL;
292 }
293
294 if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
295 DRM_ERROR("Packet size larger than size of data provided\n");
296 return -EINVAL;
297 }
298
299 switch (*cmd & 0xff00) {
300 /* XXX Are there old drivers needing other packets? */
301
302 case RADEON_3D_DRAW_IMMD:
303 case RADEON_3D_DRAW_VBUF:
304 case RADEON_3D_DRAW_INDX:
305 case RADEON_WAIT_FOR_IDLE:
306 case RADEON_CP_NOP:
307 case RADEON_3D_CLEAR_ZMASK:
308/* case RADEON_CP_NEXT_CHAR:
309 case RADEON_CP_PLY_NEXTSCAN:
310 case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
311 /* these packets are safe */
312 break;
313
314 case RADEON_CP_3D_DRAW_IMMD_2:
315 case RADEON_CP_3D_DRAW_VBUF_2:
316 case RADEON_CP_3D_DRAW_INDX_2:
317 case RADEON_3D_CLEAR_HIZ:
318 /* safe but r200 only */
319 if (dev_priv->microcode_version != UCODE_R200) {
320 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
321 return -EINVAL;
322 }
323 break;
324
325 case RADEON_3D_LOAD_VBPNTR:
326
327 if (count > 18) { /* 12 arrays max */
328 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
329 count);
330 return -EINVAL;
331 }
332
333 /* carefully check packet contents */
334 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
335
336 narrays = *cmd & ~0xc000;
337 k = 0;
338 i = 2;
339 while ((k < narrays) && (i < (count + 2))) {
340 i++; /* skip attribute field */
341 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
342 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
343 cmd)) {
344 DRM_ERROR
345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
346 k, i);
347 return -EINVAL;
348 }
349 k++;
350 i++;
351 if (k == narrays)
352 break;
353 /* have one more to process, they come in pairs */
354 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
355
356 if (radeon_check_and_fixup_offset(dev_priv,
357 file_priv, cmd))
358 {
359 DRM_ERROR
360 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
361 k, i);
362 return -EINVAL;
363 }
364 k++;
365 i++;
366 }
367 /* do the counts match what we expect ? */
368 if ((k != narrays) || (i != (count + 2))) {
369 DRM_ERROR
370 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
371 k, i, narrays, count + 1);
372 return -EINVAL;
373 }
374 break;
375
376 case RADEON_3D_RNDR_GEN_INDX_PRIM:
377 if (dev_priv->microcode_version != UCODE_R100) {
378 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
379 return -EINVAL;
380 }
381
382 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
383 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
384 DRM_ERROR("Invalid rndr_gen_indx offset\n");
385 return -EINVAL;
386 }
387 break;
388
389 case RADEON_CP_INDX_BUFFER:
390 if (dev_priv->microcode_version != UCODE_R200) {
391 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
392 return -EINVAL;
393 }
394
395 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
396 if ((*cmd & 0x8000ffff) != 0x80000810) {
397 DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
398 return -EINVAL;
399 }
400 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
401 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
402 DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
403 return -EINVAL;
404 }
405 break;
406
407 case RADEON_CNTL_HOSTDATA_BLT:
408 case RADEON_CNTL_PAINT_MULTI:
409 case RADEON_CNTL_BITBLT_MULTI:
410 /* MSB of opcode: next DWORD GUI_CNTL */
411 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
412 if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
413 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
414 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
415 offset = *cmd2 << 10;
416 if (radeon_check_and_fixup_offset
417 (dev_priv, file_priv, &offset)) {
418 DRM_ERROR("Invalid first packet offset\n");
419 return -EINVAL;
420 }
421 *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
422 }
423
424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
427 offset = *cmd3 << 10;
428 if (radeon_check_and_fixup_offset
429 (dev_priv, file_priv, &offset)) {
430 DRM_ERROR("Invalid second packet offset\n");
431 return -EINVAL;
432 }
433 *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
434 }
435 break;
436
437 default:
438 DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
439 return -EINVAL;
440 }
441
442 return 0;
443}
444
445/* ================================================================
446 * CP hardware state programming functions
447 */
448
449static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
450 struct drm_clip_rect * box)
451{
452 RING_LOCALS;
453
454 DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n",
455 box->x1, box->y1, box->x2, box->y2);
456
457 BEGIN_RING(4);
458 OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
459 OUT_RING((box->y1 << 16) | box->x1);
460 OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
461 OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
462 ADVANCE_RING();
463}
464
465/* Emit 1.1 state
466 */
467static int radeon_emit_state(drm_radeon_private_t * dev_priv,
468 struct drm_file *file_priv,
469 drm_radeon_context_regs_t * ctx,
470 drm_radeon_texture_regs_t * tex,
471 unsigned int dirty)
472{
473 RING_LOCALS;
474 DRM_DEBUG("dirty=0x%08x\n", dirty);
475
476 if (dirty & RADEON_UPLOAD_CONTEXT) {
477 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
478 &ctx->rb3d_depthoffset)) {
479 DRM_ERROR("Invalid depth buffer offset\n");
480 return -EINVAL;
481 }
482
483 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
484 &ctx->rb3d_coloroffset)) {
485 DRM_ERROR("Invalid depth buffer offset\n");
486 return -EINVAL;
487 }
488
489 BEGIN_RING(14);
490 OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
491 OUT_RING(ctx->pp_misc);
492 OUT_RING(ctx->pp_fog_color);
493 OUT_RING(ctx->re_solid_color);
494 OUT_RING(ctx->rb3d_blendcntl);
495 OUT_RING(ctx->rb3d_depthoffset);
496 OUT_RING(ctx->rb3d_depthpitch);
497 OUT_RING(ctx->rb3d_zstencilcntl);
498 OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
499 OUT_RING(ctx->pp_cntl);
500 OUT_RING(ctx->rb3d_cntl);
501 OUT_RING(ctx->rb3d_coloroffset);
502 OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
503 OUT_RING(ctx->rb3d_colorpitch);
504 ADVANCE_RING();
505 }
506
507 if (dirty & RADEON_UPLOAD_VERTFMT) {
508 BEGIN_RING(2);
509 OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
510 OUT_RING(ctx->se_coord_fmt);
511 ADVANCE_RING();
512 }
513
514 if (dirty & RADEON_UPLOAD_LINE) {
515 BEGIN_RING(5);
516 OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
517 OUT_RING(ctx->re_line_pattern);
518 OUT_RING(ctx->re_line_state);
519 OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
520 OUT_RING(ctx->se_line_width);
521 ADVANCE_RING();
522 }
523
524 if (dirty & RADEON_UPLOAD_BUMPMAP) {
525 BEGIN_RING(5);
526 OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
527 OUT_RING(ctx->pp_lum_matrix);
528 OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
529 OUT_RING(ctx->pp_rot_matrix_0);
530 OUT_RING(ctx->pp_rot_matrix_1);
531 ADVANCE_RING();
532 }
533
534 if (dirty & RADEON_UPLOAD_MASKS) {
535 BEGIN_RING(4);
536 OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
537 OUT_RING(ctx->rb3d_stencilrefmask);
538 OUT_RING(ctx->rb3d_ropcntl);
539 OUT_RING(ctx->rb3d_planemask);
540 ADVANCE_RING();
541 }
542
543 if (dirty & RADEON_UPLOAD_VIEWPORT) {
544 BEGIN_RING(7);
545 OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
546 OUT_RING(ctx->se_vport_xscale);
547 OUT_RING(ctx->se_vport_xoffset);
548 OUT_RING(ctx->se_vport_yscale);
549 OUT_RING(ctx->se_vport_yoffset);
550 OUT_RING(ctx->se_vport_zscale);
551 OUT_RING(ctx->se_vport_zoffset);
552 ADVANCE_RING();
553 }
554
555 if (dirty & RADEON_UPLOAD_SETUP) {
556 BEGIN_RING(4);
557 OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
558 OUT_RING(ctx->se_cntl);
559 OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
560 OUT_RING(ctx->se_cntl_status);
561 ADVANCE_RING();
562 }
563
564 if (dirty & RADEON_UPLOAD_MISC) {
565 BEGIN_RING(2);
566 OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
567 OUT_RING(ctx->re_misc);
568 ADVANCE_RING();
569 }
570
571 if (dirty & RADEON_UPLOAD_TEX0) {
572 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
573 &tex[0].pp_txoffset)) {
574 DRM_ERROR("Invalid texture offset for unit 0\n");
575 return -EINVAL;
576 }
577
578 BEGIN_RING(9);
579 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
580 OUT_RING(tex[0].pp_txfilter);
581 OUT_RING(tex[0].pp_txformat);
582 OUT_RING(tex[0].pp_txoffset);
583 OUT_RING(tex[0].pp_txcblend);
584 OUT_RING(tex[0].pp_txablend);
585 OUT_RING(tex[0].pp_tfactor);
586 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
587 OUT_RING(tex[0].pp_border_color);
588 ADVANCE_RING();
589 }
590
591 if (dirty & RADEON_UPLOAD_TEX1) {
592 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
593 &tex[1].pp_txoffset)) {
594 DRM_ERROR("Invalid texture offset for unit 1\n");
595 return -EINVAL;
596 }
597
598 BEGIN_RING(9);
599 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
600 OUT_RING(tex[1].pp_txfilter);
601 OUT_RING(tex[1].pp_txformat);
602 OUT_RING(tex[1].pp_txoffset);
603 OUT_RING(tex[1].pp_txcblend);
604 OUT_RING(tex[1].pp_txablend);
605 OUT_RING(tex[1].pp_tfactor);
606 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
607 OUT_RING(tex[1].pp_border_color);
608 ADVANCE_RING();
609 }
610
611 if (dirty & RADEON_UPLOAD_TEX2) {
612 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
613 &tex[2].pp_txoffset)) {
614 DRM_ERROR("Invalid texture offset for unit 2\n");
615 return -EINVAL;
616 }
617
618 BEGIN_RING(9);
619 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
620 OUT_RING(tex[2].pp_txfilter);
621 OUT_RING(tex[2].pp_txformat);
622 OUT_RING(tex[2].pp_txoffset);
623 OUT_RING(tex[2].pp_txcblend);
624 OUT_RING(tex[2].pp_txablend);
625 OUT_RING(tex[2].pp_tfactor);
626 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
627 OUT_RING(tex[2].pp_border_color);
628 ADVANCE_RING();
629 }
630
631 return 0;
632}
633
634/* Emit 1.2 state
635 */
636static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
637 struct drm_file *file_priv,
638 drm_radeon_state_t * state)
639{
640 RING_LOCALS;
641
642 if (state->dirty & RADEON_UPLOAD_ZBIAS) {
643 BEGIN_RING(3);
644 OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
645 OUT_RING(state->context2.se_zbias_factor);
646 OUT_RING(state->context2.se_zbias_constant);
647 ADVANCE_RING();
648 }
649
650 return radeon_emit_state(dev_priv, file_priv, &state->context,
651 state->tex, state->dirty);
652}
653
654/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
655 * 1.3 cmdbuffers allow all previous state to be updated as well as
656 * the tcl scalar and vector areas.
657 */
658static struct {
659 int start;
660 int len;
661 const char *name;
662} packet[RADEON_MAX_STATE_PACKETS] = {
663 {RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
664 {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
665 {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
666 {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
667 {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
668 {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
669 {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
670 {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
671 {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
672 {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
673 {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
674 {RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
675 {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
676 {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
677 {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
678 {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
679 {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
680 {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
681 {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
682 {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
683 {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
684 "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
685 {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
686 {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
687 {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
688 {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
689 {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
690 {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
691 {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
692 {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
693 {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
694 {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
695 {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
696 {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
697 {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
698 {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
699 {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
700 {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
701 {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
702 {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
703 {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
704 {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
705 {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
706 {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
707 {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
708 {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
709 {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
710 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
711 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
712 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
713 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
714 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
715 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
716 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
717 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
718 {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
719 {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
720 {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
721 {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
722 {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
723 {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
724 {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
725 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
726 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
727 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
728 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
729 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
730 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
731 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
732 {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
733 {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
734 {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
735 {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
736 {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
737 {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
738 {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
739 {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
740 {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
741 {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
742 {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
743 {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
744 {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
745 {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
746 {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
747 {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
748 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
749 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
750 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
751 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
752 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
753 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
754 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
755 {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
756 {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
757 {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
758 {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
759 {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
760 {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
761};
762
763/* ================================================================
764 * Performance monitoring functions
765 */
766
767static void radeon_clear_box(drm_radeon_private_t * dev_priv,
768 struct drm_radeon_master_private *master_priv,
769 int x, int y, int w, int h, int r, int g, int b)
770{
771 u32 color;
772 RING_LOCALS;
773
774 x += master_priv->sarea_priv->boxes[0].x1;
775 y += master_priv->sarea_priv->boxes[0].y1;
776
777 switch (dev_priv->color_fmt) {
778 case RADEON_COLOR_FORMAT_RGB565:
779 color = (((r & 0xf8) << 8) |
780 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
781 break;
782 case RADEON_COLOR_FORMAT_ARGB8888:
783 default:
784 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
785 break;
786 }
787
788 BEGIN_RING(4);
789 RADEON_WAIT_UNTIL_3D_IDLE();
790 OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
791 OUT_RING(0xffffffff);
792 ADVANCE_RING();
793
794 BEGIN_RING(6);
795
796 OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
797 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
798 RADEON_GMC_BRUSH_SOLID_COLOR |
799 (dev_priv->color_fmt << 8) |
800 RADEON_GMC_SRC_DATATYPE_COLOR |
801 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
802
803 if (master_priv->sarea_priv->pfCurrentPage == 1) {
804 OUT_RING(dev_priv->front_pitch_offset);
805 } else {
806 OUT_RING(dev_priv->back_pitch_offset);
807 }
808
809 OUT_RING(color);
810
811 OUT_RING((x << 16) | y);
812 OUT_RING((w << 16) | h);
813
814 ADVANCE_RING();
815}
816
817static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv)
818{
819 /* Collapse various things into a wait flag -- trying to
820 * guess if userspase slept -- better just to have them tell us.
821 */
822 if (dev_priv->stats.last_frame_reads > 1 ||
823 dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
824 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
825 }
826
827 if (dev_priv->stats.freelist_loops) {
828 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
829 }
830
831 /* Purple box for page flipping
832 */
833 if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
834 radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
835
836 /* Red box if we have to wait for idle at any point
837 */
838 if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
839 radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
840
841 /* Blue box: lost context?
842 */
843
844 /* Yellow box for texture swaps
845 */
846 if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
847 radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
848
849 /* Green box if hardware never idles (as far as we can tell)
850 */
851 if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
852 radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
853
854 /* Draw bars indicating number of buffers allocated
855 * (not a great measure, easily confused)
856 */
857 if (dev_priv->stats.requested_bufs) {
858 if (dev_priv->stats.requested_bufs > 100)
859 dev_priv->stats.requested_bufs = 100;
860
861 radeon_clear_box(dev_priv, master_priv, 4, 16,
862 dev_priv->stats.requested_bufs, 4,
863 196, 128, 128);
864 }
865
866 memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
867
868}
869
870/* ================================================================
871 * CP command dispatch functions
872 */
873
874static void radeon_cp_dispatch_clear(struct drm_device * dev,
875 struct drm_master *master,
876 drm_radeon_clear_t * clear,
877 drm_radeon_clear_rect_t * depth_boxes)
878{
879 drm_radeon_private_t *dev_priv = dev->dev_private;
880 struct drm_radeon_master_private *master_priv = master->driver_priv;
881 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
882 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
883 int nbox = sarea_priv->nbox;
884 struct drm_clip_rect *pbox = sarea_priv->boxes;
885 unsigned int flags = clear->flags;
886 u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
887 int i;
888 RING_LOCALS;
889 DRM_DEBUG("flags = 0x%x\n", flags);
890
891 dev_priv->stats.clears++;
892
893 if (sarea_priv->pfCurrentPage == 1) {
894 unsigned int tmp = flags;
895
896 flags &= ~(RADEON_FRONT | RADEON_BACK);
897 if (tmp & RADEON_FRONT)
898 flags |= RADEON_BACK;
899 if (tmp & RADEON_BACK)
900 flags |= RADEON_FRONT;
901 }
902 if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
903 if (!dev_priv->have_z_offset) {
904 printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
905 flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
906 }
907 }
908
909 if (flags & (RADEON_FRONT | RADEON_BACK)) {
910
911 BEGIN_RING(4);
912
913 /* Ensure the 3D stream is idle before doing a
914 * 2D fill to clear the front or back buffer.
915 */
916 RADEON_WAIT_UNTIL_3D_IDLE();
917
918 OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
919 OUT_RING(clear->color_mask);
920
921 ADVANCE_RING();
922
923 /* Make sure we restore the 3D state next time.
924 */
925 sarea_priv->ctx_owner = 0;
926
927 for (i = 0; i < nbox; i++) {
928 int x = pbox[i].x1;
929 int y = pbox[i].y1;
930 int w = pbox[i].x2 - x;
931 int h = pbox[i].y2 - y;
932
933 DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
934 x, y, w, h, flags);
935
936 if (flags & RADEON_FRONT) {
937 BEGIN_RING(6);
938
939 OUT_RING(CP_PACKET3
940 (RADEON_CNTL_PAINT_MULTI, 4));
941 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
942 RADEON_GMC_BRUSH_SOLID_COLOR |
943 (dev_priv->
944 color_fmt << 8) |
945 RADEON_GMC_SRC_DATATYPE_COLOR |
946 RADEON_ROP3_P |
947 RADEON_GMC_CLR_CMP_CNTL_DIS);
948
949 OUT_RING(dev_priv->front_pitch_offset);
950 OUT_RING(clear->clear_color);
951
952 OUT_RING((x << 16) | y);
953 OUT_RING((w << 16) | h);
954
955 ADVANCE_RING();
956 }
957
958 if (flags & RADEON_BACK) {
959 BEGIN_RING(6);
960
961 OUT_RING(CP_PACKET3
962 (RADEON_CNTL_PAINT_MULTI, 4));
963 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
964 RADEON_GMC_BRUSH_SOLID_COLOR |
965 (dev_priv->
966 color_fmt << 8) |
967 RADEON_GMC_SRC_DATATYPE_COLOR |
968 RADEON_ROP3_P |
969 RADEON_GMC_CLR_CMP_CNTL_DIS);
970
971 OUT_RING(dev_priv->back_pitch_offset);
972 OUT_RING(clear->clear_color);
973
974 OUT_RING((x << 16) | y);
975 OUT_RING((w << 16) | h);
976
977 ADVANCE_RING();
978 }
979 }
980 }
981
982 /* hyper z clear */
983 /* no docs available, based on reverse engineering by Stephane Marchesin */
984 if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
985 && (flags & RADEON_CLEAR_FASTZ)) {
986
987 int i;
988 int depthpixperline =
989 dev_priv->depth_fmt ==
990 RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
991 2) : (dev_priv->
992 depth_pitch / 4);
993
994 u32 clearmask;
995
996 u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
997 ((clear->depth_mask & 0xff) << 24);
998
999 /* Make sure we restore the 3D state next time.
1000 * we haven't touched any "normal" state - still need this?
1001 */
1002 sarea_priv->ctx_owner = 0;
1003
1004 if ((dev_priv->flags & RADEON_HAS_HIERZ)
1005 && (flags & RADEON_USE_HIERZ)) {
1006 /* FIXME : reverse engineer that for Rx00 cards */
1007 /* FIXME : the mask supposedly contains low-res z values. So can't set
1008 just to the max (0xff? or actually 0x3fff?), need to take z clear
1009 value into account? */
1010 /* pattern seems to work for r100, though get slight
1011 rendering errors with glxgears. If hierz is not enabled for r100,
1012 only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
1013 other ones are ignored, and the same clear mask can be used. That's
1014 very different behaviour than R200 which needs different clear mask
1015 and different number of tiles to clear if hierz is enabled or not !?!
1016 */
1017 clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
1018 } else {
1019 /* clear mask : chooses the clearing pattern.
1020 rv250: could be used to clear only parts of macrotiles
1021 (but that would get really complicated...)?
1022 bit 0 and 1 (either or both of them ?!?!) are used to
1023 not clear tile (or maybe one of the bits indicates if the tile is
1024 compressed or not), bit 2 and 3 to not clear tile 1,...,.
1025 Pattern is as follows:
1026 | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
1027 bits -------------------------------------------------
1028 | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
1029 rv100: clearmask covers 2x8 4x1 tiles, but one clear still
1030 covers 256 pixels ?!?
1031 */
1032 clearmask = 0x0;
1033 }
1034
1035 BEGIN_RING(8);
1036 RADEON_WAIT_UNTIL_2D_IDLE();
1037 OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
1038 tempRB3D_DEPTHCLEARVALUE);
1039 /* what offset is this exactly ? */
1040 OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
1041 /* need ctlstat, otherwise get some strange black flickering */
1042 OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
1043 RADEON_RB3D_ZC_FLUSH_ALL);
1044 ADVANCE_RING();
1045
1046 for (i = 0; i < nbox; i++) {
1047 int tileoffset, nrtilesx, nrtilesy, j;
1048 /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
1049 if ((dev_priv->flags & RADEON_HAS_HIERZ)
1050 && !(dev_priv->microcode_version == UCODE_R200)) {
1051 /* FIXME : figure this out for r200 (when hierz is enabled). Or
1052 maybe r200 actually doesn't need to put the low-res z value into
1053 the tile cache like r100, but just needs to clear the hi-level z-buffer?
1054 Works for R100, both with hierz and without.
1055 R100 seems to operate on 2x1 8x8 tiles, but...
1056 odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
1057 problematic with resolutions which are not 64 pix aligned? */
1058 tileoffset =
1059 ((pbox[i].y1 >> 3) * depthpixperline +
1060 pbox[i].x1) >> 6;
1061 nrtilesx =
1062 ((pbox[i].x2 & ~63) -
1063 (pbox[i].x1 & ~63)) >> 4;
1064 nrtilesy =
1065 (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
1066 for (j = 0; j <= nrtilesy; j++) {
1067 BEGIN_RING(4);
1068 OUT_RING(CP_PACKET3
1069 (RADEON_3D_CLEAR_ZMASK, 2));
1070 /* first tile */
1071 OUT_RING(tileoffset * 8);
1072 /* the number of tiles to clear */
1073 OUT_RING(nrtilesx + 4);
1074 /* clear mask : chooses the clearing pattern. */
1075 OUT_RING(clearmask);
1076 ADVANCE_RING();
1077 tileoffset += depthpixperline >> 6;
1078 }
1079 } else if (dev_priv->microcode_version == UCODE_R200) {
1080 /* works for rv250. */
1081 /* find first macro tile (8x2 4x4 z-pixels on rv250) */
1082 tileoffset =
1083 ((pbox[i].y1 >> 3) * depthpixperline +
1084 pbox[i].x1) >> 5;
1085 nrtilesx =
1086 (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
1087 nrtilesy =
1088 (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
1089 for (j = 0; j <= nrtilesy; j++) {
1090 BEGIN_RING(4);
1091 OUT_RING(CP_PACKET3
1092 (RADEON_3D_CLEAR_ZMASK, 2));
1093 /* first tile */
1094 /* judging by the first tile offset needed, could possibly
1095 directly address/clear 4x4 tiles instead of 8x2 * 4x4
1096 macro tiles, though would still need clear mask for
1097 right/bottom if truly 4x4 granularity is desired ? */
1098 OUT_RING(tileoffset * 16);
1099 /* the number of tiles to clear */
1100 OUT_RING(nrtilesx + 1);
1101 /* clear mask : chooses the clearing pattern. */
1102 OUT_RING(clearmask);
1103 ADVANCE_RING();
1104 tileoffset += depthpixperline >> 5;
1105 }
1106 } else { /* rv 100 */
1107 /* rv100 might not need 64 pix alignment, who knows */
1108 /* offsets are, hmm, weird */
1109 tileoffset =
1110 ((pbox[i].y1 >> 4) * depthpixperline +
1111 pbox[i].x1) >> 6;
1112 nrtilesx =
1113 ((pbox[i].x2 & ~63) -
1114 (pbox[i].x1 & ~63)) >> 4;
1115 nrtilesy =
1116 (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
1117 for (j = 0; j <= nrtilesy; j++) {
1118 BEGIN_RING(4);
1119 OUT_RING(CP_PACKET3
1120 (RADEON_3D_CLEAR_ZMASK, 2));
1121 OUT_RING(tileoffset * 128);
1122 /* the number of tiles to clear */
1123 OUT_RING(nrtilesx + 4);
1124 /* clear mask : chooses the clearing pattern. */
1125 OUT_RING(clearmask);
1126 ADVANCE_RING();
1127 tileoffset += depthpixperline >> 6;
1128 }
1129 }
1130 }
1131
1132 /* TODO don't always clear all hi-level z tiles */
1133 if ((dev_priv->flags & RADEON_HAS_HIERZ)
1134 && (dev_priv->microcode_version == UCODE_R200)
1135 && (flags & RADEON_USE_HIERZ))
1136 /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
1137 /* FIXME : the mask supposedly contains low-res z values. So can't set
1138 just to the max (0xff? or actually 0x3fff?), need to take z clear
1139 value into account? */
1140 {
1141 BEGIN_RING(4);
1142 OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
1143 OUT_RING(0x0); /* First tile */
1144 OUT_RING(0x3cc0);
1145 OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
1146 ADVANCE_RING();
1147 }
1148 }
1149
1150 /* We have to clear the depth and/or stencil buffers by
1151 * rendering a quad into just those buffers. Thus, we have to
1152 * make sure the 3D engine is configured correctly.
1153 */
1154 else if ((dev_priv->microcode_version == UCODE_R200) &&
1155 (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
1156
1157 int tempPP_CNTL;
1158 int tempRE_CNTL;
1159 int tempRB3D_CNTL;
1160 int tempRB3D_ZSTENCILCNTL;
1161 int tempRB3D_STENCILREFMASK;
1162 int tempRB3D_PLANEMASK;
1163 int tempSE_CNTL;
1164 int tempSE_VTE_CNTL;
1165 int tempSE_VTX_FMT_0;
1166 int tempSE_VTX_FMT_1;
1167 int tempSE_VAP_CNTL;
1168 int tempRE_AUX_SCISSOR_CNTL;
1169
1170 tempPP_CNTL = 0;
1171 tempRE_CNTL = 0;
1172
1173 tempRB3D_CNTL = depth_clear->rb3d_cntl;
1174
1175 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
1176 tempRB3D_STENCILREFMASK = 0x0;
1177
1178 tempSE_CNTL = depth_clear->se_cntl;
1179
1180 /* Disable TCL */
1181
1182 tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
1183 (0x9 <<
1184 SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
1185
1186 tempRB3D_PLANEMASK = 0x0;
1187
1188 tempRE_AUX_SCISSOR_CNTL = 0x0;
1189
1190 tempSE_VTE_CNTL =
1191 SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
1192
1193 /* Vertex format (X, Y, Z, W) */
1194 tempSE_VTX_FMT_0 =
1195 SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
1196 SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
1197 tempSE_VTX_FMT_1 = 0x0;
1198
1199 /*
1200 * Depth buffer specific enables
1201 */
1202 if (flags & RADEON_DEPTH) {
1203 /* Enable depth buffer */
1204 tempRB3D_CNTL |= RADEON_Z_ENABLE;
1205 } else {
1206 /* Disable depth buffer */
1207 tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
1208 }
1209
1210 /*
1211 * Stencil buffer specific enables
1212 */
1213 if (flags & RADEON_STENCIL) {
1214 tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
1215 tempRB3D_STENCILREFMASK = clear->depth_mask;
1216 } else {
1217 tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
1218 tempRB3D_STENCILREFMASK = 0x00000000;
1219 }
1220
1221 if (flags & RADEON_USE_COMP_ZBUF) {
1222 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1223 RADEON_Z_DECOMPRESSION_ENABLE;
1224 }
1225 if (flags & RADEON_USE_HIERZ) {
1226 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1227 }
1228
1229 BEGIN_RING(26);
1230 RADEON_WAIT_UNTIL_2D_IDLE();
1231
1232 OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
1233 OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
1234 OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
1235 OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
1236 OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
1237 tempRB3D_STENCILREFMASK);
1238 OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
1239 OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
1240 OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
1241 OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
1242 OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
1243 OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
1244 OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
1245 ADVANCE_RING();
1246
1247 /* Make sure we restore the 3D state next time.
1248 */
1249 sarea_priv->ctx_owner = 0;
1250
1251 for (i = 0; i < nbox; i++) {
1252
1253 /* Funny that this should be required --
1254 * sets top-left?
1255 */
1256 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1257
1258 BEGIN_RING(14);
1259 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
1260 OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
1261 RADEON_PRIM_WALK_RING |
1262 (3 << RADEON_NUM_VERTICES_SHIFT)));
1263 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1264 OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
1265 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1266 OUT_RING(0x3f800000);
1267 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1268 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1269 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1270 OUT_RING(0x3f800000);
1271 OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
1272 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1273 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1274 OUT_RING(0x3f800000);
1275 ADVANCE_RING();
1276 }
1277 } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
1278
1279 int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
1280
1281 rb3d_cntl = depth_clear->rb3d_cntl;
1282
1283 if (flags & RADEON_DEPTH) {
1284 rb3d_cntl |= RADEON_Z_ENABLE;
1285 } else {
1286 rb3d_cntl &= ~RADEON_Z_ENABLE;
1287 }
1288
1289 if (flags & RADEON_STENCIL) {
1290 rb3d_cntl |= RADEON_STENCIL_ENABLE;
1291 rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
1292 } else {
1293 rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
1294 rb3d_stencilrefmask = 0x00000000;
1295 }
1296
1297 if (flags & RADEON_USE_COMP_ZBUF) {
1298 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1299 RADEON_Z_DECOMPRESSION_ENABLE;
1300 }
1301 if (flags & RADEON_USE_HIERZ) {
1302 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1303 }
1304
1305 BEGIN_RING(13);
1306 RADEON_WAIT_UNTIL_2D_IDLE();
1307
1308 OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
1309 OUT_RING(0x00000000);
1310 OUT_RING(rb3d_cntl);
1311
1312 OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
1313 OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
1314 OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
1315 OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
1316 ADVANCE_RING();
1317
1318 /* Make sure we restore the 3D state next time.
1319 */
1320 sarea_priv->ctx_owner = 0;
1321
1322 for (i = 0; i < nbox; i++) {
1323
1324 /* Funny that this should be required --
1325 * sets top-left?
1326 */
1327 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1328
1329 BEGIN_RING(15);
1330
1331 OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
1332 OUT_RING(RADEON_VTX_Z_PRESENT |
1333 RADEON_VTX_PKCOLOR_PRESENT);
1334 OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
1335 RADEON_PRIM_WALK_RING |
1336 RADEON_MAOS_ENABLE |
1337 RADEON_VTX_FMT_RADEON_MODE |
1338 (3 << RADEON_NUM_VERTICES_SHIFT)));
1339
1340 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1341 OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
1342 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1343 OUT_RING(0x0);
1344
1345 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1346 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1347 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1348 OUT_RING(0x0);
1349
1350 OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
1351 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1352 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1353 OUT_RING(0x0);
1354
1355 ADVANCE_RING();
1356 }
1357 }
1358
1359 /* Increment the clear counter. The client-side 3D driver must
1360 * wait on this value before performing the clear ioctl. We
1361 * need this because the card's so damned fast...
1362 */
1363 sarea_priv->last_clear++;
1364
1365 BEGIN_RING(4);
1366
1367 RADEON_CLEAR_AGE(sarea_priv->last_clear);
1368 RADEON_WAIT_UNTIL_IDLE();
1369
1370 ADVANCE_RING();
1371}
1372
1373static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master)
1374{
1375 drm_radeon_private_t *dev_priv = dev->dev_private;
1376 struct drm_radeon_master_private *master_priv = master->driver_priv;
1377 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1378 int nbox = sarea_priv->nbox;
1379 struct drm_clip_rect *pbox = sarea_priv->boxes;
1380 int i;
1381 RING_LOCALS;
1382 DRM_DEBUG("\n");
1383
1384 /* Do some trivial performance monitoring...
1385 */
1386 if (dev_priv->do_boxes)
1387 radeon_cp_performance_boxes(dev_priv, master_priv);
1388
1389 /* Wait for the 3D stream to idle before dispatching the bitblt.
1390 * This will prevent data corruption between the two streams.
1391 */
1392 BEGIN_RING(2);
1393
1394 RADEON_WAIT_UNTIL_3D_IDLE();
1395
1396 ADVANCE_RING();
1397
1398 for (i = 0; i < nbox; i++) {
1399 int x = pbox[i].x1;
1400 int y = pbox[i].y1;
1401 int w = pbox[i].x2 - x;
1402 int h = pbox[i].y2 - y;
1403
1404 DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
1405
1406 BEGIN_RING(9);
1407
1408 OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
1409 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1410 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1411 RADEON_GMC_BRUSH_NONE |
1412 (dev_priv->color_fmt << 8) |
1413 RADEON_GMC_SRC_DATATYPE_COLOR |
1414 RADEON_ROP3_S |
1415 RADEON_DP_SRC_SOURCE_MEMORY |
1416 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
1417
1418 /* Make this work even if front & back are flipped:
1419 */
1420 OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
1421 if (sarea_priv->pfCurrentPage == 0) {
1422 OUT_RING(dev_priv->back_pitch_offset);
1423 OUT_RING(dev_priv->front_pitch_offset);
1424 } else {
1425 OUT_RING(dev_priv->front_pitch_offset);
1426 OUT_RING(dev_priv->back_pitch_offset);
1427 }
1428
1429 OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
1430 OUT_RING((x << 16) | y);
1431 OUT_RING((x << 16) | y);
1432 OUT_RING((w << 16) | h);
1433
1434 ADVANCE_RING();
1435 }
1436
1437 /* Increment the frame counter. The client-side 3D driver must
1438 * throttle the framerate by waiting for this value before
1439 * performing the swapbuffer ioctl.
1440 */
1441 sarea_priv->last_frame++;
1442
1443 BEGIN_RING(4);
1444
1445 RADEON_FRAME_AGE(sarea_priv->last_frame);
1446 RADEON_WAIT_UNTIL_2D_IDLE();
1447
1448 ADVANCE_RING();
1449}
1450
1451void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master)
1452{
1453 drm_radeon_private_t *dev_priv = dev->dev_private;
1454 struct drm_radeon_master_private *master_priv = master->driver_priv;
1455 struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
1456 int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
1457 ? dev_priv->front_offset : dev_priv->back_offset;
1458 RING_LOCALS;
1459 DRM_DEBUG("pfCurrentPage=%d\n",
1460 master_priv->sarea_priv->pfCurrentPage);
1461
1462 /* Do some trivial performance monitoring...
1463 */
1464 if (dev_priv->do_boxes) {
1465 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1466 radeon_cp_performance_boxes(dev_priv, master_priv);
1467 }
1468
1469 /* Update the frame offsets for both CRTCs
1470 */
1471 BEGIN_RING(6);
1472
1473 RADEON_WAIT_UNTIL_3D_IDLE();
1474 OUT_RING_REG(RADEON_CRTC_OFFSET,
1475 ((sarea->frame.y * dev_priv->front_pitch +
1476 sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
1477 + offset);
1478 OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base
1479 + offset);
1480
1481 ADVANCE_RING();
1482
1483 /* Increment the frame counter. The client-side 3D driver must
1484 * throttle the framerate by waiting for this value before
1485 * performing the swapbuffer ioctl.
1486 */
1487 master_priv->sarea_priv->last_frame++;
1488 master_priv->sarea_priv->pfCurrentPage =
1489 1 - master_priv->sarea_priv->pfCurrentPage;
1490
1491 BEGIN_RING(2);
1492
1493 RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame);
1494
1495 ADVANCE_RING();
1496}
1497
1498static int bad_prim_vertex_nr(int primitive, int nr)
1499{
1500 switch (primitive & RADEON_PRIM_TYPE_MASK) {
1501 case RADEON_PRIM_TYPE_NONE:
1502 case RADEON_PRIM_TYPE_POINT:
1503 return nr < 1;
1504 case RADEON_PRIM_TYPE_LINE:
1505 return (nr & 1) || nr == 0;
1506 case RADEON_PRIM_TYPE_LINE_STRIP:
1507 return nr < 2;
1508 case RADEON_PRIM_TYPE_TRI_LIST:
1509 case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
1510 case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
1511 case RADEON_PRIM_TYPE_RECT_LIST:
1512 return nr % 3 || nr == 0;
1513 case RADEON_PRIM_TYPE_TRI_FAN:
1514 case RADEON_PRIM_TYPE_TRI_STRIP:
1515 return nr < 3;
1516 default:
1517 return 1;
1518 }
1519}
1520
1521typedef struct {
1522 unsigned int start;
1523 unsigned int finish;
1524 unsigned int prim;
1525 unsigned int numverts;
1526 unsigned int offset;
1527 unsigned int vc_format;
1528} drm_radeon_tcl_prim_t;
1529
1530static void radeon_cp_dispatch_vertex(struct drm_device * dev,
1531 struct drm_file *file_priv,
1532 struct drm_buf * buf,
1533 drm_radeon_tcl_prim_t * prim)
1534{
1535 drm_radeon_private_t *dev_priv = dev->dev_private;
1536 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
1537 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1538 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1539 int numverts = (int)prim->numverts;
1540 int nbox = sarea_priv->nbox;
1541 int i = 0;
1542 RING_LOCALS;
1543
1544 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1545 prim->prim,
1546 prim->vc_format, prim->start, prim->finish, prim->numverts);
1547
1548 if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
1549 DRM_ERROR("bad prim %x numverts %d\n",
1550 prim->prim, prim->numverts);
1551 return;
1552 }
1553
1554 do {
1555 /* Emit the next cliprect */
1556 if (i < nbox) {
1557 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1558 }
1559
1560 /* Emit the vertex buffer rendering commands */
1561 BEGIN_RING(5);
1562
1563 OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
1564 OUT_RING(offset);
1565 OUT_RING(numverts);
1566 OUT_RING(prim->vc_format);
1567 OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
1568 RADEON_COLOR_ORDER_RGBA |
1569 RADEON_VTX_FMT_RADEON_MODE |
1570 (numverts << RADEON_NUM_VERTICES_SHIFT));
1571
1572 ADVANCE_RING();
1573
1574 i++;
1575 } while (i < nbox);
1576}
1577
1578void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
1579{
1580 drm_radeon_private_t *dev_priv = dev->dev_private;
1581 struct drm_radeon_master_private *master_priv = master->driver_priv;
1582 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1583 RING_LOCALS;
1584
1585 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
1586
1587 /* Emit the vertex buffer age */
1588 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
1589 BEGIN_RING(3);
1590 R600_DISPATCH_AGE(buf_priv->age);
1591 ADVANCE_RING();
1592 } else {
1593 BEGIN_RING(2);
1594 RADEON_DISPATCH_AGE(buf_priv->age);
1595 ADVANCE_RING();
1596 }
1597
1598 buf->pending = 1;
1599 buf->used = 0;
1600}
1601
1602static void radeon_cp_dispatch_indirect(struct drm_device * dev,
1603 struct drm_buf * buf, int start, int end)
1604{
1605 drm_radeon_private_t *dev_priv = dev->dev_private;
1606 RING_LOCALS;
1607 DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
1608
1609 if (start != end) {
1610 int offset = (dev_priv->gart_buffers_offset
1611 + buf->offset + start);
1612 int dwords = (end - start + 3) / sizeof(u32);
1613
1614 /* Indirect buffer data must be an even number of
1615 * dwords, so if we've been given an odd number we must
1616 * pad the data with a Type-2 CP packet.
1617 */
1618 if (dwords & 1) {
1619 u32 *data = (u32 *)
1620 ((char *)dev->agp_buffer_map->handle
1621 + buf->offset + start);
1622 data[dwords++] = RADEON_CP_PACKET2;
1623 }
1624
1625 /* Fire off the indirect buffer */
1626 BEGIN_RING(3);
1627
1628 OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
1629 OUT_RING(offset);
1630 OUT_RING(dwords);
1631
1632 ADVANCE_RING();
1633 }
1634}
1635
1636static void radeon_cp_dispatch_indices(struct drm_device *dev,
1637 struct drm_master *master,
1638 struct drm_buf * elt_buf,
1639 drm_radeon_tcl_prim_t * prim)
1640{
1641 drm_radeon_private_t *dev_priv = dev->dev_private;
1642 struct drm_radeon_master_private *master_priv = master->driver_priv;
1643 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1644 int offset = dev_priv->gart_buffers_offset + prim->offset;
1645 u32 *data;
1646 int dwords;
1647 int i = 0;
1648 int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1649 int count = (prim->finish - start) / sizeof(u16);
1650 int nbox = sarea_priv->nbox;
1651
1652 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1653 prim->prim,
1654 prim->vc_format,
1655 prim->start, prim->finish, prim->offset, prim->numverts);
1656
1657 if (bad_prim_vertex_nr(prim->prim, count)) {
1658 DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
1659 return;
1660 }
1661
1662 if (start >= prim->finish || (prim->start & 0x7)) {
1663 DRM_ERROR("buffer prim %d\n", prim->prim);
1664 return;
1665 }
1666
1667 dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1668
1669 data = (u32 *) ((char *)dev->agp_buffer_map->handle +
1670 elt_buf->offset + prim->start);
1671
1672 data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
1673 data[1] = offset;
1674 data[2] = prim->numverts;
1675 data[3] = prim->vc_format;
1676 data[4] = (prim->prim |
1677 RADEON_PRIM_WALK_IND |
1678 RADEON_COLOR_ORDER_RGBA |
1679 RADEON_VTX_FMT_RADEON_MODE |
1680 (count << RADEON_NUM_VERTICES_SHIFT));
1681
1682 do {
1683 if (i < nbox)
1684 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1685
1686 radeon_cp_dispatch_indirect(dev, elt_buf,
1687 prim->start, prim->finish);
1688
1689 i++;
1690 } while (i < nbox);
1691
1692}
1693
1694#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1695
1696static int radeon_cp_dispatch_texture(struct drm_device * dev,
1697 struct drm_file *file_priv,
1698 drm_radeon_texture_t * tex,
1699 drm_radeon_tex_image_t * image)
1700{
1701 drm_radeon_private_t *dev_priv = dev->dev_private;
1702 struct drm_buf *buf;
1703 u32 format;
1704 u32 *buffer;
1705 const u8 __user *data;
1706 unsigned int size, dwords, tex_width, blit_width, spitch;
1707 u32 height;
1708 int i;
1709 u32 texpitch, microtile;
1710 u32 offset, byte_offset;
1711 RING_LOCALS;
1712
1713 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
1714 DRM_ERROR("Invalid destination offset\n");
1715 return -EINVAL;
1716 }
1717
1718 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1719
1720 /* Flush the pixel cache. This ensures no pixel data gets mixed
1721 * up with the texture data from the host data blit, otherwise
1722 * part of the texture image may be corrupted.
1723 */
1724 BEGIN_RING(4);
1725 RADEON_FLUSH_CACHE();
1726 RADEON_WAIT_UNTIL_IDLE();
1727 ADVANCE_RING();
1728
1729 /* The compiler won't optimize away a division by a variable,
1730 * even if the only legal values are powers of two. Thus, we'll
1731 * use a shift instead.
1732 */
1733 switch (tex->format) {
1734 case RADEON_TXFORMAT_ARGB8888:
1735 case RADEON_TXFORMAT_RGBA8888:
1736 format = RADEON_COLOR_FORMAT_ARGB8888;
1737 tex_width = tex->width * 4;
1738 blit_width = image->width * 4;
1739 break;
1740 case RADEON_TXFORMAT_AI88:
1741 case RADEON_TXFORMAT_ARGB1555:
1742 case RADEON_TXFORMAT_RGB565:
1743 case RADEON_TXFORMAT_ARGB4444:
1744 case RADEON_TXFORMAT_VYUY422:
1745 case RADEON_TXFORMAT_YVYU422:
1746 format = RADEON_COLOR_FORMAT_RGB565;
1747 tex_width = tex->width * 2;
1748 blit_width = image->width * 2;
1749 break;
1750 case RADEON_TXFORMAT_I8:
1751 case RADEON_TXFORMAT_RGB332:
1752 format = RADEON_COLOR_FORMAT_CI8;
1753 tex_width = tex->width * 1;
1754 blit_width = image->width * 1;
1755 break;
1756 default:
1757 DRM_ERROR("invalid texture format %d\n", tex->format);
1758 return -EINVAL;
1759 }
1760 spitch = blit_width >> 6;
1761 if (spitch == 0 && image->height > 1)
1762 return -EINVAL;
1763
1764 texpitch = tex->pitch;
1765 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
1766 microtile = 1;
1767 if (tex_width < 64) {
1768 texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
1769 /* we got tiled coordinates, untile them */
1770 image->x *= 2;
1771 }
1772 } else
1773 microtile = 0;
1774
1775 /* this might fail for zero-sized uploads - are those illegal? */
1776 if (!radeon_check_offset(dev_priv, tex->offset + image->height *
1777 blit_width - 1)) {
1778 DRM_ERROR("Invalid final destination offset\n");
1779 return -EINVAL;
1780 }
1781
1782 DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
1783
1784 do {
1785 DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1786 tex->offset >> 10, tex->pitch, tex->format,
1787 image->x, image->y, image->width, image->height);
1788
1789 /* Make a copy of some parameters in case we have to
1790 * update them for a multi-pass texture blit.
1791 */
1792 height = image->height;
1793 data = (const u8 __user *)image->data;
1794
1795 size = height * blit_width;
1796
1797 if (size > RADEON_MAX_TEXTURE_SIZE) {
1798 height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1799 size = height * blit_width;
1800 } else if (size < 4 && size > 0) {
1801 size = 4;
1802 } else if (size == 0) {
1803 return 0;
1804 }
1805
1806 buf = radeon_freelist_get(dev);
1807 if (0 && !buf) {
1808 radeon_do_cp_idle(dev_priv);
1809 buf = radeon_freelist_get(dev);
1810 }
1811 if (!buf) {
1812 DRM_DEBUG("EAGAIN\n");
1813 if (copy_to_user(tex->image, image, sizeof(*image)))
1814 return -EFAULT;
1815 return -EAGAIN;
1816 }
1817
1818 /* Dispatch the indirect buffer.
1819 */
1820 buffer =
1821 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
1822 dwords = size / 4;
1823
1824#define RADEON_COPY_MT(_buf, _data, _width) \
1825 do { \
1826 if (copy_from_user(_buf, _data, (_width))) {\
1827 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1828 return -EFAULT; \
1829 } \
1830 } while(0)
1831
1832 if (microtile) {
1833 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
1834 however, we cannot use blitter directly for texture width < 64 bytes,
1835 since minimum tex pitch is 64 bytes and we need this to match
1836 the texture width, otherwise the blitter will tile it wrong.
1837 Thus, tiling manually in this case. Additionally, need to special
1838 case tex height = 1, since our actual image will have height 2
1839 and we need to ensure we don't read beyond the texture size
1840 from user space. */
1841 if (tex->height == 1) {
1842 if (tex_width >= 64 || tex_width <= 16) {
1843 RADEON_COPY_MT(buffer, data,
1844 (int)(tex_width * sizeof(u32)));
1845 } else if (tex_width == 32) {
1846 RADEON_COPY_MT(buffer, data, 16);
1847 RADEON_COPY_MT(buffer + 8,
1848 data + 16, 16);
1849 }
1850 } else if (tex_width >= 64 || tex_width == 16) {
1851 RADEON_COPY_MT(buffer, data,
1852 (int)(dwords * sizeof(u32)));
1853 } else if (tex_width < 16) {
1854 for (i = 0; i < tex->height; i++) {
1855 RADEON_COPY_MT(buffer, data, tex_width);
1856 buffer += 4;
1857 data += tex_width;
1858 }
1859 } else if (tex_width == 32) {
1860 /* TODO: make sure this works when not fitting in one buffer
1861 (i.e. 32bytes x 2048...) */
1862 for (i = 0; i < tex->height; i += 2) {
1863 RADEON_COPY_MT(buffer, data, 16);
1864 data += 16;
1865 RADEON_COPY_MT(buffer + 8, data, 16);
1866 data += 16;
1867 RADEON_COPY_MT(buffer + 4, data, 16);
1868 data += 16;
1869 RADEON_COPY_MT(buffer + 12, data, 16);
1870 data += 16;
1871 buffer += 16;
1872 }
1873 }
1874 } else {
1875 if (tex_width >= 32) {
1876 /* Texture image width is larger than the minimum, so we
1877 * can upload it directly.
1878 */
1879 RADEON_COPY_MT(buffer, data,
1880 (int)(dwords * sizeof(u32)));
1881 } else {
1882 /* Texture image width is less than the minimum, so we
1883 * need to pad out each image scanline to the minimum
1884 * width.
1885 */
1886 for (i = 0; i < tex->height; i++) {
1887 RADEON_COPY_MT(buffer, data, tex_width);
1888 buffer += 8;
1889 data += tex_width;
1890 }
1891 }
1892 }
1893
1894#undef RADEON_COPY_MT
1895 byte_offset = (image->y & ~2047) * blit_width;
1896 buf->file_priv = file_priv;
1897 buf->used = size;
1898 offset = dev_priv->gart_buffers_offset + buf->offset;
1899 BEGIN_RING(9);
1900 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1901 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1902 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1903 RADEON_GMC_BRUSH_NONE |
1904 (format << 8) |
1905 RADEON_GMC_SRC_DATATYPE_COLOR |
1906 RADEON_ROP3_S |
1907 RADEON_DP_SRC_SOURCE_MEMORY |
1908 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
1909 OUT_RING((spitch << 22) | (offset >> 10));
1910 OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
1911 OUT_RING(0);
1912 OUT_RING((image->x << 16) | (image->y % 2048));
1913 OUT_RING((image->width << 16) | height);
1914 RADEON_WAIT_UNTIL_2D_IDLE();
1915 ADVANCE_RING();
1916 COMMIT_RING();
1917
1918 radeon_cp_discard_buffer(dev, file_priv->master, buf);
1919
1920 /* Update the input parameters for next time */
1921 image->y += height;
1922 image->height -= height;
1923 image->data = (const u8 __user *)image->data + size;
1924 } while (image->height > 0);
1925
1926 /* Flush the pixel cache after the blit completes. This ensures
1927 * the texture data is written out to memory before rendering
1928 * continues.
1929 */
1930 BEGIN_RING(4);
1931 RADEON_FLUSH_CACHE();
1932 RADEON_WAIT_UNTIL_2D_IDLE();
1933 ADVANCE_RING();
1934 COMMIT_RING();
1935
1936 return 0;
1937}
1938
1939static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
1940{
1941 drm_radeon_private_t *dev_priv = dev->dev_private;
1942 int i;
1943 RING_LOCALS;
1944 DRM_DEBUG("\n");
1945
1946 BEGIN_RING(35);
1947
1948 OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
1949 OUT_RING(0x00000000);
1950
1951 OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
1952 for (i = 0; i < 32; i++) {
1953 OUT_RING(stipple[i]);
1954 }
1955
1956 ADVANCE_RING();
1957}
1958
1959static void radeon_apply_surface_regs(int surf_index,
1960 drm_radeon_private_t *dev_priv)
1961{
1962 if (!dev_priv->mmio)
1963 return;
1964
1965 radeon_do_cp_idle(dev_priv);
1966
1967 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
1968 dev_priv->surfaces[surf_index].flags);
1969 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
1970 dev_priv->surfaces[surf_index].lower);
1971 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
1972 dev_priv->surfaces[surf_index].upper);
1973}
1974
1975/* Allocates a virtual surface
1976 * doesn't always allocate a real surface, will stretch an existing
1977 * surface when possible.
1978 *
1979 * Note that refcount can be at most 2, since during a free refcount=3
1980 * might mean we have to allocate a new surface which might not always
1981 * be available.
1982 * For example : we allocate three contiguous surfaces ABC. If B is
1983 * freed, we suddenly need two surfaces to store A and C, which might
1984 * not always be available.
1985 */
1986static int alloc_surface(drm_radeon_surface_alloc_t *new,
1987 drm_radeon_private_t *dev_priv,
1988 struct drm_file *file_priv)
1989{
1990 struct radeon_virt_surface *s;
1991 int i;
1992 int virt_surface_index;
1993 uint32_t new_upper, new_lower;
1994
1995 new_lower = new->address;
1996 new_upper = new_lower + new->size - 1;
1997
1998 /* sanity check */
1999 if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
2000 ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
2001 RADEON_SURF_ADDRESS_FIXED_MASK)
2002 || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
2003 return -1;
2004
2005 /* make sure there is no overlap with existing surfaces */
2006 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
2007 if ((dev_priv->surfaces[i].refcount != 0) &&
2008 (((new_lower >= dev_priv->surfaces[i].lower) &&
2009 (new_lower < dev_priv->surfaces[i].upper)) ||
2010 ((new_lower < dev_priv->surfaces[i].lower) &&
2011 (new_upper > dev_priv->surfaces[i].lower)))) {
2012 return -1;
2013 }
2014 }
2015
2016 /* find a virtual surface */
2017 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
2018 if (dev_priv->virt_surfaces[i].file_priv == NULL)
2019 break;
2020 if (i == 2 * RADEON_MAX_SURFACES) {
2021 return -1;
2022 }
2023 virt_surface_index = i;
2024
2025 /* try to reuse an existing surface */
2026 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
2027 /* extend before */
2028 if ((dev_priv->surfaces[i].refcount == 1) &&
2029 (new->flags == dev_priv->surfaces[i].flags) &&
2030 (new_upper + 1 == dev_priv->surfaces[i].lower)) {
2031 s = &(dev_priv->virt_surfaces[virt_surface_index]);
2032 s->surface_index = i;
2033 s->lower = new_lower;
2034 s->upper = new_upper;
2035 s->flags = new->flags;
2036 s->file_priv = file_priv;
2037 dev_priv->surfaces[i].refcount++;
2038 dev_priv->surfaces[i].lower = s->lower;
2039 radeon_apply_surface_regs(s->surface_index, dev_priv);
2040 return virt_surface_index;
2041 }
2042
2043 /* extend after */
2044 if ((dev_priv->surfaces[i].refcount == 1) &&
2045 (new->flags == dev_priv->surfaces[i].flags) &&
2046 (new_lower == dev_priv->surfaces[i].upper + 1)) {
2047 s = &(dev_priv->virt_surfaces[virt_surface_index]);
2048 s->surface_index = i;
2049 s->lower = new_lower;
2050 s->upper = new_upper;
2051 s->flags = new->flags;
2052 s->file_priv = file_priv;
2053 dev_priv->surfaces[i].refcount++;
2054 dev_priv->surfaces[i].upper = s->upper;
2055 radeon_apply_surface_regs(s->surface_index, dev_priv);
2056 return virt_surface_index;
2057 }
2058 }
2059
2060 /* okay, we need a new one */
2061 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
2062 if (dev_priv->surfaces[i].refcount == 0) {
2063 s = &(dev_priv->virt_surfaces[virt_surface_index]);
2064 s->surface_index = i;
2065 s->lower = new_lower;
2066 s->upper = new_upper;
2067 s->flags = new->flags;
2068 s->file_priv = file_priv;
2069 dev_priv->surfaces[i].refcount = 1;
2070 dev_priv->surfaces[i].lower = s->lower;
2071 dev_priv->surfaces[i].upper = s->upper;
2072 dev_priv->surfaces[i].flags = s->flags;
2073 radeon_apply_surface_regs(s->surface_index, dev_priv);
2074 return virt_surface_index;
2075 }
2076 }
2077
2078 /* we didn't find anything */
2079 return -1;
2080}
2081
2082static int free_surface(struct drm_file *file_priv,
2083 drm_radeon_private_t * dev_priv,
2084 int lower)
2085{
2086 struct radeon_virt_surface *s;
2087 int i;
2088 /* find the virtual surface */
2089 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2090 s = &(dev_priv->virt_surfaces[i]);
2091 if (s->file_priv) {
2092 if ((lower == s->lower) && (file_priv == s->file_priv))
2093 {
2094 if (dev_priv->surfaces[s->surface_index].
2095 lower == s->lower)
2096 dev_priv->surfaces[s->surface_index].
2097 lower = s->upper;
2098
2099 if (dev_priv->surfaces[s->surface_index].
2100 upper == s->upper)
2101 dev_priv->surfaces[s->surface_index].
2102 upper = s->lower;
2103
2104 dev_priv->surfaces[s->surface_index].refcount--;
2105 if (dev_priv->surfaces[s->surface_index].
2106 refcount == 0)
2107 dev_priv->surfaces[s->surface_index].
2108 flags = 0;
2109 s->file_priv = NULL;
2110 radeon_apply_surface_regs(s->surface_index,
2111 dev_priv);
2112 return 0;
2113 }
2114 }
2115 }
2116 return 1;
2117}
2118
2119static void radeon_surfaces_release(struct drm_file *file_priv,
2120 drm_radeon_private_t * dev_priv)
2121{
2122 int i;
2123 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2124 if (dev_priv->virt_surfaces[i].file_priv == file_priv)
2125 free_surface(file_priv, dev_priv,
2126 dev_priv->virt_surfaces[i].lower);
2127 }
2128}
2129
2130/* ================================================================
2131 * IOCTL functions
2132 */
2133static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
2134{
2135 drm_radeon_private_t *dev_priv = dev->dev_private;
2136 drm_radeon_surface_alloc_t *alloc = data;
2137
2138 if (alloc_surface(alloc, dev_priv, file_priv) == -1)
2139 return -EINVAL;
2140 else
2141 return 0;
2142}
2143
2144static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
2145{
2146 drm_radeon_private_t *dev_priv = dev->dev_private;
2147 drm_radeon_surface_free_t *memfree = data;
2148
2149 if (free_surface(file_priv, dev_priv, memfree->address))
2150 return -EINVAL;
2151 else
2152 return 0;
2153}
2154
2155static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
2156{
2157 drm_radeon_private_t *dev_priv = dev->dev_private;
2158 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2159 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
2160 drm_radeon_clear_t *clear = data;
2161 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
2162 DRM_DEBUG("\n");
2163
2164 LOCK_TEST_WITH_RETURN(dev, file_priv);
2165
2166 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2167
2168 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2169 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2170
2171 if (copy_from_user(&depth_boxes, clear->depth_boxes,
2172 sarea_priv->nbox * sizeof(depth_boxes[0])))
2173 return -EFAULT;
2174
2175 radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
2176
2177 COMMIT_RING();
2178 return 0;
2179}
2180
2181/* Not sure why this isn't set all the time:
2182 */
2183static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master)
2184{
2185 drm_radeon_private_t *dev_priv = dev->dev_private;
2186 struct drm_radeon_master_private *master_priv = master->driver_priv;
2187 RING_LOCALS;
2188
2189 DRM_DEBUG("\n");
2190
2191 BEGIN_RING(6);
2192 RADEON_WAIT_UNTIL_3D_IDLE();
2193 OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
2194 OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
2195 RADEON_CRTC_OFFSET_FLIP_CNTL);
2196 OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
2197 OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
2198 RADEON_CRTC_OFFSET_FLIP_CNTL);
2199 ADVANCE_RING();
2200
2201 dev_priv->page_flipping = 1;
2202
2203 if (master_priv->sarea_priv->pfCurrentPage != 1)
2204 master_priv->sarea_priv->pfCurrentPage = 0;
2205
2206 return 0;
2207}
2208
2209/* Swapping and flipping are different operations, need different ioctls.
2210 * They can & should be intermixed to support multiple 3d windows.
2211 */
2212static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
2213{
2214 drm_radeon_private_t *dev_priv = dev->dev_private;
2215 DRM_DEBUG("\n");
2216
2217 LOCK_TEST_WITH_RETURN(dev, file_priv);
2218
2219 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2220
2221 if (!dev_priv->page_flipping)
2222 radeon_do_init_pageflip(dev, file_priv->master);
2223
2224 radeon_cp_dispatch_flip(dev, file_priv->master);
2225
2226 COMMIT_RING();
2227 return 0;
2228}
2229
2230static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
2231{
2232 drm_radeon_private_t *dev_priv = dev->dev_private;
2233 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2234 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
2235
2236 DRM_DEBUG("\n");
2237
2238 LOCK_TEST_WITH_RETURN(dev, file_priv);
2239
2240 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2241
2242 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2243 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2244
2245 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
2246 r600_cp_dispatch_swap(dev, file_priv);
2247 else
2248 radeon_cp_dispatch_swap(dev, file_priv->master);
2249 sarea_priv->ctx_owner = 0;
2250
2251 COMMIT_RING();
2252 return 0;
2253}
2254
2255static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
2256{
2257 drm_radeon_private_t *dev_priv = dev->dev_private;
2258 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2259 drm_radeon_sarea_t *sarea_priv;
2260 struct drm_device_dma *dma = dev->dma;
2261 struct drm_buf *buf;
2262 drm_radeon_vertex_t *vertex = data;
2263 drm_radeon_tcl_prim_t prim;
2264
2265 LOCK_TEST_WITH_RETURN(dev, file_priv);
2266
2267 sarea_priv = master_priv->sarea_priv;
2268
2269 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
2270 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
2271
2272 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2273 DRM_ERROR("buffer index %d (of %d max)\n",
2274 vertex->idx, dma->buf_count - 1);
2275 return -EINVAL;
2276 }
2277 if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2278 DRM_ERROR("buffer prim %d\n", vertex->prim);
2279 return -EINVAL;
2280 }
2281
2282 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2283 VB_AGE_TEST_WITH_RETURN(dev_priv);
2284
2285 buf = dma->buflist[vertex->idx];
2286
2287 if (buf->file_priv != file_priv) {
2288 DRM_ERROR("process %d using buffer owned by %p\n",
2289 DRM_CURRENTPID, buf->file_priv);
2290 return -EINVAL;
2291 }
2292 if (buf->pending) {
2293 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2294 return -EINVAL;
2295 }
2296
2297 /* Build up a prim_t record:
2298 */
2299 if (vertex->count) {
2300 buf->used = vertex->count; /* not used? */
2301
2302 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2303 if (radeon_emit_state(dev_priv, file_priv,
2304 &sarea_priv->context_state,
2305 sarea_priv->tex_state,
2306 sarea_priv->dirty)) {
2307 DRM_ERROR("radeon_emit_state failed\n");
2308 return -EINVAL;
2309 }
2310
2311 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2312 RADEON_UPLOAD_TEX1IMAGES |
2313 RADEON_UPLOAD_TEX2IMAGES |
2314 RADEON_REQUIRE_QUIESCENCE);
2315 }
2316
2317 prim.start = 0;
2318 prim.finish = vertex->count; /* unused */
2319 prim.prim = vertex->prim;
2320 prim.numverts = vertex->count;
2321 prim.vc_format = sarea_priv->vc_format;
2322
2323 radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
2324 }
2325
2326 if (vertex->discard) {
2327 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2328 }
2329
2330 COMMIT_RING();
2331 return 0;
2332}
2333
2334static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
2335{
2336 drm_radeon_private_t *dev_priv = dev->dev_private;
2337 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2338 drm_radeon_sarea_t *sarea_priv;
2339 struct drm_device_dma *dma = dev->dma;
2340 struct drm_buf *buf;
2341 drm_radeon_indices_t *elts = data;
2342 drm_radeon_tcl_prim_t prim;
2343 int count;
2344
2345 LOCK_TEST_WITH_RETURN(dev, file_priv);
2346
2347 sarea_priv = master_priv->sarea_priv;
2348
2349 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
2350 DRM_CURRENTPID, elts->idx, elts->start, elts->end,
2351 elts->discard);
2352
2353 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
2354 DRM_ERROR("buffer index %d (of %d max)\n",
2355 elts->idx, dma->buf_count - 1);
2356 return -EINVAL;
2357 }
2358 if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2359 DRM_ERROR("buffer prim %d\n", elts->prim);
2360 return -EINVAL;
2361 }
2362
2363 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2364 VB_AGE_TEST_WITH_RETURN(dev_priv);
2365
2366 buf = dma->buflist[elts->idx];
2367
2368 if (buf->file_priv != file_priv) {
2369 DRM_ERROR("process %d using buffer owned by %p\n",
2370 DRM_CURRENTPID, buf->file_priv);
2371 return -EINVAL;
2372 }
2373 if (buf->pending) {
2374 DRM_ERROR("sending pending buffer %d\n", elts->idx);
2375 return -EINVAL;
2376 }
2377
2378 count = (elts->end - elts->start) / sizeof(u16);
2379 elts->start -= RADEON_INDEX_PRIM_OFFSET;
2380
2381 if (elts->start & 0x7) {
2382 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
2383 return -EINVAL;
2384 }
2385 if (elts->start < buf->used) {
2386 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
2387 return -EINVAL;
2388 }
2389
2390 buf->used = elts->end;
2391
2392 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2393 if (radeon_emit_state(dev_priv, file_priv,
2394 &sarea_priv->context_state,
2395 sarea_priv->tex_state,
2396 sarea_priv->dirty)) {
2397 DRM_ERROR("radeon_emit_state failed\n");
2398 return -EINVAL;
2399 }
2400
2401 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2402 RADEON_UPLOAD_TEX1IMAGES |
2403 RADEON_UPLOAD_TEX2IMAGES |
2404 RADEON_REQUIRE_QUIESCENCE);
2405 }
2406
2407 /* Build up a prim_t record:
2408 */
2409 prim.start = elts->start;
2410 prim.finish = elts->end;
2411 prim.prim = elts->prim;
2412 prim.offset = 0; /* offset from start of dma buffers */
2413 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2414 prim.vc_format = sarea_priv->vc_format;
2415
2416 radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
2417 if (elts->discard) {
2418 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2419 }
2420
2421 COMMIT_RING();
2422 return 0;
2423}
2424
2425static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
2426{
2427 drm_radeon_private_t *dev_priv = dev->dev_private;
2428 drm_radeon_texture_t *tex = data;
2429 drm_radeon_tex_image_t image;
2430 int ret;
2431
2432 LOCK_TEST_WITH_RETURN(dev, file_priv);
2433
2434 if (tex->image == NULL) {
2435 DRM_ERROR("null texture image!\n");
2436 return -EINVAL;
2437 }
2438
2439 if (copy_from_user(&image,
2440 (drm_radeon_tex_image_t __user *) tex->image,
2441 sizeof(image)))
2442 return -EFAULT;
2443
2444 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2445 VB_AGE_TEST_WITH_RETURN(dev_priv);
2446
2447 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
2448 ret = r600_cp_dispatch_texture(dev, file_priv, tex, &image);
2449 else
2450 ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
2451
2452 return ret;
2453}
2454
2455static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
2456{
2457 drm_radeon_private_t *dev_priv = dev->dev_private;
2458 drm_radeon_stipple_t *stipple = data;
2459 u32 mask[32];
2460
2461 LOCK_TEST_WITH_RETURN(dev, file_priv);
2462
2463 if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
2464 return -EFAULT;
2465
2466 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2467
2468 radeon_cp_dispatch_stipple(dev, mask);
2469
2470 COMMIT_RING();
2471 return 0;
2472}
2473
2474static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
2475{
2476 drm_radeon_private_t *dev_priv = dev->dev_private;
2477 struct drm_device_dma *dma = dev->dma;
2478 struct drm_buf *buf;
2479 drm_radeon_indirect_t *indirect = data;
2480 RING_LOCALS;
2481
2482 LOCK_TEST_WITH_RETURN(dev, file_priv);
2483
2484 DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
2485 indirect->idx, indirect->start, indirect->end,
2486 indirect->discard);
2487
2488 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
2489 DRM_ERROR("buffer index %d (of %d max)\n",
2490 indirect->idx, dma->buf_count - 1);
2491 return -EINVAL;
2492 }
2493
2494 buf = dma->buflist[indirect->idx];
2495
2496 if (buf->file_priv != file_priv) {
2497 DRM_ERROR("process %d using buffer owned by %p\n",
2498 DRM_CURRENTPID, buf->file_priv);
2499 return -EINVAL;
2500 }
2501 if (buf->pending) {
2502 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
2503 return -EINVAL;
2504 }
2505
2506 if (indirect->start < buf->used) {
2507 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
2508 indirect->start, buf->used);
2509 return -EINVAL;
2510 }
2511
2512 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2513 VB_AGE_TEST_WITH_RETURN(dev_priv);
2514
2515 buf->used = indirect->end;
2516
2517 /* Dispatch the indirect buffer full of commands from the
2518 * X server. This is insecure and is thus only available to
2519 * privileged clients.
2520 */
2521 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
2522 r600_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
2523 else {
2524 /* Wait for the 3D stream to idle before the indirect buffer
2525 * containing 2D acceleration commands is processed.
2526 */
2527 BEGIN_RING(2);
2528 RADEON_WAIT_UNTIL_3D_IDLE();
2529 ADVANCE_RING();
2530 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
2531 }
2532
2533 if (indirect->discard) {
2534 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2535 }
2536
2537 COMMIT_RING();
2538 return 0;
2539}
2540
2541static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
2542{
2543 drm_radeon_private_t *dev_priv = dev->dev_private;
2544 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2545 drm_radeon_sarea_t *sarea_priv;
2546 struct drm_device_dma *dma = dev->dma;
2547 struct drm_buf *buf;
2548 drm_radeon_vertex2_t *vertex = data;
2549 int i;
2550 unsigned char laststate;
2551
2552 LOCK_TEST_WITH_RETURN(dev, file_priv);
2553
2554 sarea_priv = master_priv->sarea_priv;
2555
2556 DRM_DEBUG("pid=%d index=%d discard=%d\n",
2557 DRM_CURRENTPID, vertex->idx, vertex->discard);
2558
2559 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2560 DRM_ERROR("buffer index %d (of %d max)\n",
2561 vertex->idx, dma->buf_count - 1);
2562 return -EINVAL;
2563 }
2564
2565 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2566 VB_AGE_TEST_WITH_RETURN(dev_priv);
2567
2568 buf = dma->buflist[vertex->idx];
2569
2570 if (buf->file_priv != file_priv) {
2571 DRM_ERROR("process %d using buffer owned by %p\n",
2572 DRM_CURRENTPID, buf->file_priv);
2573 return -EINVAL;
2574 }
2575
2576 if (buf->pending) {
2577 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2578 return -EINVAL;
2579 }
2580
2581 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2582 return -EINVAL;
2583
2584 for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
2585 drm_radeon_prim_t prim;
2586 drm_radeon_tcl_prim_t tclprim;
2587
2588 if (copy_from_user(&prim, &vertex->prim[i], sizeof(prim)))
2589 return -EFAULT;
2590
2591 if (prim.stateidx != laststate) {
2592 drm_radeon_state_t state;
2593
2594 if (copy_from_user(&state,
2595 &vertex->state[prim.stateidx],
2596 sizeof(state)))
2597 return -EFAULT;
2598
2599 if (radeon_emit_state2(dev_priv, file_priv, &state)) {
2600 DRM_ERROR("radeon_emit_state2 failed\n");
2601 return -EINVAL;
2602 }
2603
2604 laststate = prim.stateidx;
2605 }
2606
2607 tclprim.start = prim.start;
2608 tclprim.finish = prim.finish;
2609 tclprim.prim = prim.prim;
2610 tclprim.vc_format = prim.vc_format;
2611
2612 if (prim.prim & RADEON_PRIM_WALK_IND) {
2613 tclprim.offset = prim.numverts * 64;
2614 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2615
2616 radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
2617 } else {
2618 tclprim.numverts = prim.numverts;
2619 tclprim.offset = 0; /* not used */
2620
2621 radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
2622 }
2623
2624 if (sarea_priv->nbox == 1)
2625 sarea_priv->nbox = 0;
2626 }
2627
2628 if (vertex->discard) {
2629 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2630 }
2631
2632 COMMIT_RING();
2633 return 0;
2634}
2635
2636static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2637 struct drm_file *file_priv,
2638 drm_radeon_cmd_header_t header,
2639 drm_radeon_kcmd_buffer_t *cmdbuf)
2640{
2641 int id = (int)header.packet.packet_id;
2642 int sz, reg;
2643 RING_LOCALS;
2644
2645 if (id >= RADEON_MAX_STATE_PACKETS)
2646 return -EINVAL;
2647
2648 sz = packet[id].len;
2649 reg = packet[id].start;
2650
2651 if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
2652 DRM_ERROR("Packet size provided larger than data provided\n");
2653 return -EINVAL;
2654 }
2655
2656 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
2657 cmdbuf->buffer)) {
2658 DRM_ERROR("Packet verification failed\n");
2659 return -EINVAL;
2660 }
2661
2662 BEGIN_RING(sz + 1);
2663 OUT_RING(CP_PACKET0(reg, (sz - 1)));
2664 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2665 ADVANCE_RING();
2666
2667 return 0;
2668}
2669
2670static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2671 drm_radeon_cmd_header_t header,
2672 drm_radeon_kcmd_buffer_t *cmdbuf)
2673{
2674 int sz = header.scalars.count;
2675 int start = header.scalars.offset;
2676 int stride = header.scalars.stride;
2677 RING_LOCALS;
2678
2679 BEGIN_RING(3 + sz);
2680 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2681 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2682 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2683 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2684 ADVANCE_RING();
2685 return 0;
2686}
2687
2688/* God this is ugly
2689 */
2690static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2691 drm_radeon_cmd_header_t header,
2692 drm_radeon_kcmd_buffer_t *cmdbuf)
2693{
2694 int sz = header.scalars.count;
2695 int start = ((unsigned int)header.scalars.offset) + 0x100;
2696 int stride = header.scalars.stride;
2697 RING_LOCALS;
2698
2699 BEGIN_RING(3 + sz);
2700 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2701 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2702 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2703 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2704 ADVANCE_RING();
2705 return 0;
2706}
2707
2708static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2709 drm_radeon_cmd_header_t header,
2710 drm_radeon_kcmd_buffer_t *cmdbuf)
2711{
2712 int sz = header.vectors.count;
2713 int start = header.vectors.offset;
2714 int stride = header.vectors.stride;
2715 RING_LOCALS;
2716
2717 BEGIN_RING(5 + sz);
2718 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
2719 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2720 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2721 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2722 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2723 ADVANCE_RING();
2724
2725 return 0;
2726}
2727
2728static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2729 drm_radeon_cmd_header_t header,
2730 drm_radeon_kcmd_buffer_t *cmdbuf)
2731{
2732 int sz = header.veclinear.count * 4;
2733 int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
2734 RING_LOCALS;
2735
2736 if (!sz)
2737 return 0;
2738 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
2739 return -EINVAL;
2740
2741 BEGIN_RING(5 + sz);
2742 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
2743 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2744 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2745 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2746 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2747 ADVANCE_RING();
2748
2749 return 0;
2750}
2751
2752static int radeon_emit_packet3(struct drm_device * dev,
2753 struct drm_file *file_priv,
2754 drm_radeon_kcmd_buffer_t *cmdbuf)
2755{
2756 drm_radeon_private_t *dev_priv = dev->dev_private;
2757 unsigned int cmdsz;
2758 int ret;
2759 RING_LOCALS;
2760
2761 DRM_DEBUG("\n");
2762
2763 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2764 cmdbuf, &cmdsz))) {
2765 DRM_ERROR("Packet verification failed\n");
2766 return ret;
2767 }
2768
2769 BEGIN_RING(cmdsz);
2770 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2771 ADVANCE_RING();
2772
2773 return 0;
2774}
2775
2776static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2777 struct drm_file *file_priv,
2778 drm_radeon_kcmd_buffer_t *cmdbuf,
2779 int orig_nbox)
2780{
2781 drm_radeon_private_t *dev_priv = dev->dev_private;
2782 struct drm_clip_rect box;
2783 unsigned int cmdsz;
2784 int ret;
2785 struct drm_clip_rect __user *boxes = cmdbuf->boxes;
2786 int i = 0;
2787 RING_LOCALS;
2788
2789 DRM_DEBUG("\n");
2790
2791 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2792 cmdbuf, &cmdsz))) {
2793 DRM_ERROR("Packet verification failed\n");
2794 return ret;
2795 }
2796
2797 if (!orig_nbox)
2798 goto out;
2799
2800 do {
2801 if (i < cmdbuf->nbox) {
2802 if (copy_from_user(&box, &boxes[i], sizeof(box)))
2803 return -EFAULT;
2804 /* FIXME The second and subsequent times round
2805 * this loop, send a WAIT_UNTIL_3D_IDLE before
2806 * calling emit_clip_rect(). This fixes a
2807 * lockup on fast machines when sending
2808 * several cliprects with a cmdbuf, as when
2809 * waving a 2D window over a 3D
2810 * window. Something in the commands from user
2811 * space seems to hang the card when they're
2812 * sent several times in a row. That would be
2813 * the correct place to fix it but this works
2814 * around it until I can figure that out - Tim
2815 * Smith */
2816 if (i) {
2817 BEGIN_RING(2);
2818 RADEON_WAIT_UNTIL_3D_IDLE();
2819 ADVANCE_RING();
2820 }
2821 radeon_emit_clip_rect(dev_priv, &box);
2822 }
2823
2824 BEGIN_RING(cmdsz);
2825 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2826 ADVANCE_RING();
2827
2828 } while (++i < cmdbuf->nbox);
2829 if (cmdbuf->nbox == 1)
2830 cmdbuf->nbox = 0;
2831
2832 return 0;
2833 out:
2834 drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
2835 return 0;
2836}
2837
2838static int radeon_emit_wait(struct drm_device * dev, int flags)
2839{
2840 drm_radeon_private_t *dev_priv = dev->dev_private;
2841 RING_LOCALS;
2842
2843 DRM_DEBUG("%x\n", flags);
2844 switch (flags) {
2845 case RADEON_WAIT_2D:
2846 BEGIN_RING(2);
2847 RADEON_WAIT_UNTIL_2D_IDLE();
2848 ADVANCE_RING();
2849 break;
2850 case RADEON_WAIT_3D:
2851 BEGIN_RING(2);
2852 RADEON_WAIT_UNTIL_3D_IDLE();
2853 ADVANCE_RING();
2854 break;
2855 case RADEON_WAIT_2D | RADEON_WAIT_3D:
2856 BEGIN_RING(2);
2857 RADEON_WAIT_UNTIL_IDLE();
2858 ADVANCE_RING();
2859 break;
2860 default:
2861 return -EINVAL;
2862 }
2863
2864 return 0;
2865}
2866
2867static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2868 struct drm_file *file_priv)
2869{
2870 drm_radeon_private_t *dev_priv = dev->dev_private;
2871 struct drm_device_dma *dma = dev->dma;
2872 struct drm_buf *buf = NULL;
2873 drm_radeon_cmd_header_t stack_header;
2874 int idx;
2875 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2876 int orig_nbox;
2877
2878 LOCK_TEST_WITH_RETURN(dev, file_priv);
2879
2880 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2881 VB_AGE_TEST_WITH_RETURN(dev_priv);
2882
2883 if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
2884 return -EINVAL;
2885 }
2886
2887 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
2888 * races between checking values and using those values in other code,
2889 * and simply to avoid a lot of function calls to copy in data.
2890 */
2891 if (cmdbuf->bufsz != 0) {
2892 int rv;
2893 void __user *buffer = cmdbuf->buffer;
2894 rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
2895 if (rv)
2896 return rv;
2897 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
2898 cmdbuf->bufsz);
2899 if (rv) {
2900 drm_buffer_free(cmdbuf->buffer);
2901 return rv;
2902 }
2903 } else
2904 goto done;
2905
2906 orig_nbox = cmdbuf->nbox;
2907
2908 if (dev_priv->microcode_version == UCODE_R300) {
2909 int temp;
2910 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2911
2912 drm_buffer_free(cmdbuf->buffer);
2913
2914 return temp;
2915 }
2916
2917 /* microcode_version != r300 */
2918 while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
2919
2920 drm_radeon_cmd_header_t *header;
2921 header = drm_buffer_read_object(cmdbuf->buffer,
2922 sizeof(stack_header), &stack_header);
2923
2924 switch (header->header.cmd_type) {
2925 case RADEON_CMD_PACKET:
2926 DRM_DEBUG("RADEON_CMD_PACKET\n");
2927 if (radeon_emit_packets
2928 (dev_priv, file_priv, *header, cmdbuf)) {
2929 DRM_ERROR("radeon_emit_packets failed\n");
2930 goto err;
2931 }
2932 break;
2933
2934 case RADEON_CMD_SCALARS:
2935 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2936 if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
2937 DRM_ERROR("radeon_emit_scalars failed\n");
2938 goto err;
2939 }
2940 break;
2941
2942 case RADEON_CMD_VECTORS:
2943 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2944 if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
2945 DRM_ERROR("radeon_emit_vectors failed\n");
2946 goto err;
2947 }
2948 break;
2949
2950 case RADEON_CMD_DMA_DISCARD:
2951 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2952 idx = header->dma.buf_idx;
2953 if (idx < 0 || idx >= dma->buf_count) {
2954 DRM_ERROR("buffer index %d (of %d max)\n",
2955 idx, dma->buf_count - 1);
2956 goto err;
2957 }
2958
2959 buf = dma->buflist[idx];
2960 if (buf->file_priv != file_priv || buf->pending) {
2961 DRM_ERROR("bad buffer %p %p %d\n",
2962 buf->file_priv, file_priv,
2963 buf->pending);
2964 goto err;
2965 }
2966
2967 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2968 break;
2969
2970 case RADEON_CMD_PACKET3:
2971 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2972 if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
2973 DRM_ERROR("radeon_emit_packet3 failed\n");
2974 goto err;
2975 }
2976 break;
2977
2978 case RADEON_CMD_PACKET3_CLIP:
2979 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2980 if (radeon_emit_packet3_cliprect
2981 (dev, file_priv, cmdbuf, orig_nbox)) {
2982 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2983 goto err;
2984 }
2985 break;
2986
2987 case RADEON_CMD_SCALARS2:
2988 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2989 if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
2990 DRM_ERROR("radeon_emit_scalars2 failed\n");
2991 goto err;
2992 }
2993 break;
2994
2995 case RADEON_CMD_WAIT:
2996 DRM_DEBUG("RADEON_CMD_WAIT\n");
2997 if (radeon_emit_wait(dev, header->wait.flags)) {
2998 DRM_ERROR("radeon_emit_wait failed\n");
2999 goto err;
3000 }
3001 break;
3002 case RADEON_CMD_VECLINEAR:
3003 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
3004 if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
3005 DRM_ERROR("radeon_emit_veclinear failed\n");
3006 goto err;
3007 }
3008 break;
3009
3010 default:
3011 DRM_ERROR("bad cmd_type %d at byte %d\n",
3012 header->header.cmd_type,
3013 cmdbuf->buffer->iterator);
3014 goto err;
3015 }
3016 }
3017
3018 drm_buffer_free(cmdbuf->buffer);
3019
3020 done:
3021 DRM_DEBUG("DONE\n");
3022 COMMIT_RING();
3023 return 0;
3024
3025 err:
3026 drm_buffer_free(cmdbuf->buffer);
3027 return -EINVAL;
3028}
3029
3030static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3031{
3032 drm_radeon_private_t *dev_priv = dev->dev_private;
3033 drm_radeon_getparam_t *param = data;
3034 int value;
3035
3036 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
3037
3038 switch (param->param) {
3039 case RADEON_PARAM_GART_BUFFER_OFFSET:
3040 value = dev_priv->gart_buffers_offset;
3041 break;
3042 case RADEON_PARAM_LAST_FRAME:
3043 dev_priv->stats.last_frame_reads++;
3044 value = GET_SCRATCH(dev_priv, 0);
3045 break;
3046 case RADEON_PARAM_LAST_DISPATCH:
3047 value = GET_SCRATCH(dev_priv, 1);
3048 break;
3049 case RADEON_PARAM_LAST_CLEAR:
3050 dev_priv->stats.last_clear_reads++;
3051 value = GET_SCRATCH(dev_priv, 2);
3052 break;
3053 case RADEON_PARAM_IRQ_NR:
3054 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
3055 value = 0;
3056 else
3057 value = dev->pdev->irq;
3058 break;
3059 case RADEON_PARAM_GART_BASE:
3060 value = dev_priv->gart_vm_start;
3061 break;
3062 case RADEON_PARAM_REGISTER_HANDLE:
3063 value = dev_priv->mmio->offset;
3064 break;
3065 case RADEON_PARAM_STATUS_HANDLE:
3066 value = dev_priv->ring_rptr_offset;
3067 break;
3068#if BITS_PER_LONG == 32
3069 /*
3070 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
3071 * pointer which can't fit into an int-sized variable. According to
3072 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
3073 * not supporting it shouldn't be a problem. If the same functionality
3074 * is needed on 64-bit platforms, a new ioctl() would have to be added,
3075 * so backwards-compatibility for the embedded platforms can be
3076 * maintained. --davidm 4-Feb-2004.
3077 */
3078 case RADEON_PARAM_SAREA_HANDLE:
3079 /* The lock is the first dword in the sarea. */
3080 /* no users of this parameter */
3081 break;
3082#endif
3083 case RADEON_PARAM_GART_TEX_HANDLE:
3084 value = dev_priv->gart_textures_offset;
3085 break;
3086 case RADEON_PARAM_SCRATCH_OFFSET:
3087 if (!dev_priv->writeback_works)
3088 return -EINVAL;
3089 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
3090 value = R600_SCRATCH_REG_OFFSET;
3091 else
3092 value = RADEON_SCRATCH_REG_OFFSET;
3093 break;
3094 case RADEON_PARAM_CARD_TYPE:
3095 if (dev_priv->flags & RADEON_IS_PCIE)
3096 value = RADEON_CARD_PCIE;
3097 else if (dev_priv->flags & RADEON_IS_AGP)
3098 value = RADEON_CARD_AGP;
3099 else
3100 value = RADEON_CARD_PCI;
3101 break;
3102 case RADEON_PARAM_VBLANK_CRTC:
3103 value = radeon_vblank_crtc_get(dev);
3104 break;
3105 case RADEON_PARAM_FB_LOCATION:
3106 value = radeon_read_fb_location(dev_priv);
3107 break;
3108 case RADEON_PARAM_NUM_GB_PIPES:
3109 value = dev_priv->num_gb_pipes;
3110 break;
3111 case RADEON_PARAM_NUM_Z_PIPES:
3112 value = dev_priv->num_z_pipes;
3113 break;
3114 default:
3115 DRM_DEBUG("Invalid parameter %d\n", param->param);
3116 return -EINVAL;
3117 }
3118
3119 if (copy_to_user(param->value, &value, sizeof(int))) {
3120 DRM_ERROR("copy_to_user\n");
3121 return -EFAULT;
3122 }
3123
3124 return 0;
3125}
3126
3127static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3128{
3129 drm_radeon_private_t *dev_priv = dev->dev_private;
3130 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
3131 drm_radeon_setparam_t *sp = data;
3132 struct drm_radeon_driver_file_fields *radeon_priv;
3133
3134 switch (sp->param) {
3135 case RADEON_SETPARAM_FB_LOCATION:
3136 radeon_priv = file_priv->driver_priv;
3137 radeon_priv->radeon_fb_delta = dev_priv->fb_location -
3138 sp->value;
3139 break;
3140 case RADEON_SETPARAM_SWITCH_TILING:
3141 if (sp->value == 0) {
3142 DRM_DEBUG("color tiling disabled\n");
3143 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3144 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3145 if (master_priv->sarea_priv)
3146 master_priv->sarea_priv->tiling_enabled = 0;
3147 } else if (sp->value == 1) {
3148 DRM_DEBUG("color tiling enabled\n");
3149 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
3150 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
3151 if (master_priv->sarea_priv)
3152 master_priv->sarea_priv->tiling_enabled = 1;
3153 }
3154 break;
3155 case RADEON_SETPARAM_PCIGART_LOCATION:
3156 dev_priv->pcigart_offset = sp->value;
3157 dev_priv->pcigart_offset_set = 1;
3158 break;
3159 case RADEON_SETPARAM_NEW_MEMMAP:
3160 dev_priv->new_memmap = sp->value;
3161 break;
3162 case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
3163 dev_priv->gart_info.table_size = sp->value;
3164 if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
3165 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
3166 break;
3167 case RADEON_SETPARAM_VBLANK_CRTC:
3168 return radeon_vblank_crtc_set(dev, sp->value);
3169 break;
3170 default:
3171 DRM_DEBUG("Invalid parameter %d\n", sp->param);
3172 return -EINVAL;
3173 }
3174
3175 return 0;
3176}
3177
3178/* When a client dies:
3179 * - Check for and clean up flipped page state
3180 * - Free any alloced GART memory.
3181 * - Free any alloced radeon surfaces.
3182 *
3183 * DRM infrastructure takes care of reclaiming dma buffers.
3184 */
3185void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
3186{
3187 if (dev->dev_private) {
3188 drm_radeon_private_t *dev_priv = dev->dev_private;
3189 dev_priv->page_flipping = 0;
3190 radeon_mem_release(file_priv, dev_priv->gart_heap);
3191 radeon_mem_release(file_priv, dev_priv->fb_heap);
3192 radeon_surfaces_release(file_priv, dev_priv);
3193 }
3194}
3195
3196void radeon_driver_lastclose(struct drm_device *dev)
3197{
3198 radeon_surfaces_release(PCIGART_FILE_PRIV, dev->dev_private);
3199 radeon_do_release(dev);
3200}
3201
3202int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
3203{
3204 drm_radeon_private_t *dev_priv = dev->dev_private;
3205 struct drm_radeon_driver_file_fields *radeon_priv;
3206
3207 DRM_DEBUG("\n");
3208 radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL);
3209
3210 if (!radeon_priv)
3211 return -ENOMEM;
3212
3213 file_priv->driver_priv = radeon_priv;
3214
3215 if (dev_priv)
3216 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
3217 else
3218 radeon_priv->radeon_fb_delta = 0;
3219 return 0;
3220}
3221
3222void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3223{
3224 struct drm_radeon_driver_file_fields *radeon_priv =
3225 file_priv->driver_priv;
3226
3227 kfree(radeon_priv);
3228}
3229
3230struct drm_ioctl_desc radeon_ioctls[] = {
3231 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3232 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3233 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3234 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3235 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
3236 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
3237 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
3238 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
3239 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
3240 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
3241 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
3242 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
3243 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
3244 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
3245 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3246 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
3247 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
3248 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
3249 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
3250 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
3251 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
3252 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3253 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
3254 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
3255 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3256 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3257 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
3258 DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
3259};
3260
3261int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 574f62bbd215..7eb1ae758906 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -361,31 +361,31 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
361 361
362 /* stitch together an VCE create msg */ 362 /* stitch together an VCE create msg */
363 ib.length_dw = 0; 363 ib.length_dw = 0;
364 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 364 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
365 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 365 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
366 ib.ptr[ib.length_dw++] = handle; 366 ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
367 367
368 ib.ptr[ib.length_dw++] = 0x00000030; /* len */ 368 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */
369 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ 369 ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */
370 ib.ptr[ib.length_dw++] = 0x00000000; 370 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
371 ib.ptr[ib.length_dw++] = 0x00000042; 371 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042);
372 ib.ptr[ib.length_dw++] = 0x0000000a; 372 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a);
373 ib.ptr[ib.length_dw++] = 0x00000001; 373 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
374 ib.ptr[ib.length_dw++] = 0x00000080; 374 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080);
375 ib.ptr[ib.length_dw++] = 0x00000060; 375 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060);
376 ib.ptr[ib.length_dw++] = 0x00000100; 376 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
377 ib.ptr[ib.length_dw++] = 0x00000100; 377 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
378 ib.ptr[ib.length_dw++] = 0x0000000c; 378 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c);
379 ib.ptr[ib.length_dw++] = 0x00000000; 379 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
380 380
381 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 381 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
382 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 382 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
383 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 383 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
384 ib.ptr[ib.length_dw++] = dummy; 384 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
385 ib.ptr[ib.length_dw++] = 0x00000001; 385 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
386 386
387 for (i = ib.length_dw; i < ib_size_dw; ++i) 387 for (i = ib.length_dw; i < ib_size_dw; ++i)
388 ib.ptr[i] = 0x0; 388 ib.ptr[i] = cpu_to_le32(0x0);
389 389
390 r = radeon_ib_schedule(rdev, &ib, NULL, false); 390 r = radeon_ib_schedule(rdev, &ib, NULL, false);
391 if (r) { 391 if (r) {
@@ -428,21 +428,21 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
428 428
429 /* stitch together an VCE destroy msg */ 429 /* stitch together an VCE destroy msg */
430 ib.length_dw = 0; 430 ib.length_dw = 0;
431 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 431 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
432 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 432 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
433 ib.ptr[ib.length_dw++] = handle; 433 ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
434 434
435 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 435 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
436 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 436 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
437 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 437 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
438 ib.ptr[ib.length_dw++] = dummy; 438 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
439 ib.ptr[ib.length_dw++] = 0x00000001; 439 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
440 440
441 ib.ptr[ib.length_dw++] = 0x00000008; /* len */ 441 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */
442 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ 442 ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */
443 443
444 for (i = ib.length_dw; i < ib_size_dw; ++i) 444 for (i = ib.length_dw; i < ib_size_dw; ++i)
445 ib.ptr[i] = 0x0; 445 ib.ptr[i] = cpu_to_le32(0x0);
446 446
447 r = radeon_ib_schedule(rdev, &ib, NULL, false); 447 r = radeon_ib_schedule(rdev, &ib, NULL, false);
448 if (r) { 448 if (r) {
@@ -699,12 +699,12 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
699{ 699{
700 uint64_t addr = semaphore->gpu_addr; 700 uint64_t addr = semaphore->gpu_addr;
701 701
702 radeon_ring_write(ring, VCE_CMD_SEMAPHORE); 702 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE));
703 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); 703 radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF));
704 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); 704 radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF));
705 radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); 705 radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0)));
706 if (!emit_wait) 706 if (!emit_wait)
707 radeon_ring_write(ring, VCE_CMD_END); 707 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
708 708
709 return true; 709 return true;
710} 710}
@@ -719,10 +719,10 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
719void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 719void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
720{ 720{
721 struct radeon_ring *ring = &rdev->ring[ib->ring]; 721 struct radeon_ring *ring = &rdev->ring[ib->ring];
722 radeon_ring_write(ring, VCE_CMD_IB); 722 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB));
723 radeon_ring_write(ring, ib->gpu_addr); 723 radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr));
724 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); 724 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr)));
725 radeon_ring_write(ring, ib->length_dw); 725 radeon_ring_write(ring, cpu_to_le32(ib->length_dw));
726} 726}
727 727
728/** 728/**
@@ -738,12 +738,12 @@ void radeon_vce_fence_emit(struct radeon_device *rdev,
738 struct radeon_ring *ring = &rdev->ring[fence->ring]; 738 struct radeon_ring *ring = &rdev->ring[fence->ring];
739 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; 739 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
740 740
741 radeon_ring_write(ring, VCE_CMD_FENCE); 741 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE));
742 radeon_ring_write(ring, addr); 742 radeon_ring_write(ring, cpu_to_le32(addr));
743 radeon_ring_write(ring, upper_32_bits(addr)); 743 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr)));
744 radeon_ring_write(ring, fence->seq); 744 radeon_ring_write(ring, cpu_to_le32(fence->seq));
745 radeon_ring_write(ring, VCE_CMD_TRAP); 745 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP));
746 radeon_ring_write(ring, VCE_CMD_END); 746 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
747} 747}
748 748
749/** 749/**
@@ -765,7 +765,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
765 ring->idx, r); 765 ring->idx, r);
766 return r; 766 return r;
767 } 767 }
768 radeon_ring_write(ring, VCE_CMD_END); 768 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
769 radeon_ring_unlock_commit(rdev, ring, false); 769 radeon_ring_unlock_commit(rdev, ring, false);
770 770
771 for (i = 0; i < rdev->usec_timeout; i++) { 771 for (i = 0; i < rdev->usec_timeout; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 48d97c040f49..3979632b9225 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
455 455
456 if (soffset) { 456 if (soffset) {
457 /* make sure object fit at this offset */ 457 /* make sure object fit at this offset */
458 eoffset = soffset + size; 458 eoffset = soffset + size - 1;
459 if (soffset >= eoffset) { 459 if (soffset >= eoffset) {
460 r = -EINVAL; 460 r = -EINVAL;
461 goto error_unreserve; 461 goto error_unreserve;
462 } 462 }
463 463
464 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; 464 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
465 if (last_pfn > rdev->vm_manager.max_pfn) { 465 if (last_pfn >= rdev->vm_manager.max_pfn) {
466 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 466 dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
467 last_pfn, rdev->vm_manager.max_pfn); 467 last_pfn, rdev->vm_manager.max_pfn);
468 r = -EINVAL; 468 r = -EINVAL;
469 goto error_unreserve; 469 goto error_unreserve;
@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
478 eoffset /= RADEON_GPU_PAGE_SIZE; 478 eoffset /= RADEON_GPU_PAGE_SIZE;
479 if (soffset || eoffset) { 479 if (soffset || eoffset) {
480 struct interval_tree_node *it; 480 struct interval_tree_node *it;
481 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); 481 it = interval_tree_iter_first(&vm->va, soffset, eoffset);
482 if (it && it != &bo_va->it) { 482 if (it && it != &bo_va->it) {
483 struct radeon_bo_va *tmp; 483 struct radeon_bo_va *tmp;
484 tmp = container_of(it, struct radeon_bo_va, it); 484 tmp = container_of(it, struct radeon_bo_va, it);
@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
518 if (soffset || eoffset) { 518 if (soffset || eoffset) {
519 spin_lock(&vm->status_lock); 519 spin_lock(&vm->status_lock);
520 bo_va->it.start = soffset; 520 bo_va->it.start = soffset;
521 bo_va->it.last = eoffset - 1; 521 bo_va->it.last = eoffset;
522 list_add(&bo_va->vm_status, &vm->cleared); 522 list_add(&bo_va->vm_status, &vm->cleared);
523 spin_unlock(&vm->status_lock); 523 spin_unlock(&vm->status_lock);
524 interval_tree_insert(&bo_va->it, &vm->va); 524 interval_tree_insert(&bo_va->it, &vm->va);
@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
888 unsigned i; 888 unsigned i;
889 889
890 start >>= radeon_vm_block_size; 890 start >>= radeon_vm_block_size;
891 end >>= radeon_vm_block_size; 891 end = (end - 1) >> radeon_vm_block_size;
892 892
893 for (i = start; i <= end; ++i) 893 for (i = start; i <= end; ++i)
894 radeon_bo_fence(vm->page_tables[i].bo, fence, true); 894 radeon_bo_fence(vm->page_tables[i].bo, fence, true);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 4c4a7218a3bd..d1a7b58dd291 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -915,6 +915,11 @@
915#define DCCG_AUDIO_DTO1_PHASE 0x05c0 915#define DCCG_AUDIO_DTO1_PHASE 0x05c0
916#define DCCG_AUDIO_DTO1_MODULE 0x05c4 916#define DCCG_AUDIO_DTO1_MODULE 0x05c4
917 917
918#define DENTIST_DISPCLK_CNTL 0x0490
919# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
920# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
921# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
922
918#define AFMT_AUDIO_SRC_CONTROL 0x713c 923#define AFMT_AUDIO_SRC_CONTROL 0x713c
919#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) 924#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
920/* AFMT_AUDIO_SRC_SELECT 925/* AFMT_AUDIO_SRC_SELECT
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 48cb19949ca3..88a4b706be16 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -613,7 +613,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
613 613
614 ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, 614 ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
615 &rgrp->planes[index % 2].plane, 615 &rgrp->planes[index % 2].plane,
616 NULL, &crtc_funcs); 616 NULL, &crtc_funcs, NULL);
617 if (ret < 0) 617 if (ret < 0)
618 return ret; 618 return ret;
619 619
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index d0ae1e8009c6..c08700757feb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -173,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
173 goto done; 173 goto done;
174 } else { 174 } else {
175 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, 175 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
176 encoder_type); 176 encoder_type, NULL);
177 if (ret < 0) 177 if (ret < 0)
178 goto done; 178 goto done;
179 179
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 96f2eb43713c..a37b6e2fe51a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -28,7 +28,7 @@ static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
28{ 28{
29 struct rcar_du_connector *con = to_rcar_connector(connector); 29 struct rcar_du_connector *con = to_rcar_connector(connector);
30 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); 30 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
31 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 31 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
32 32
33 if (sfuncs->get_modes == NULL) 33 if (sfuncs->get_modes == NULL)
34 return 0; 34 return 0;
@@ -41,7 +41,7 @@ static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
41{ 41{
42 struct rcar_du_connector *con = to_rcar_connector(connector); 42 struct rcar_du_connector *con = to_rcar_connector(connector);
43 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); 43 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
44 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 44 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
45 45
46 if (sfuncs->mode_valid == NULL) 46 if (sfuncs->mode_valid == NULL)
47 return MODE_OK; 47 return MODE_OK;
@@ -66,7 +66,7 @@ rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
66{ 66{
67 struct rcar_du_connector *con = to_rcar_connector(connector); 67 struct rcar_du_connector *con = to_rcar_connector(connector);
68 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); 68 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
69 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 69 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
70 70
71 if (sfuncs->detect == NULL) 71 if (sfuncs->detect == NULL)
72 return connector_status_unknown; 72 return connector_status_unknown;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 81da8419282b..2567efcbee36 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -35,7 +35,7 @@ struct rcar_du_hdmienc {
35static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) 35static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
36{ 36{
37 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 37 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
38 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 38 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
39 39
40 if (sfuncs->dpms) 40 if (sfuncs->dpms)
41 sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF); 41 sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -50,7 +50,7 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
50static void rcar_du_hdmienc_enable(struct drm_encoder *encoder) 50static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
51{ 51{
52 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 52 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
53 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 53 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
54 54
55 if (hdmienc->renc->lvds) 55 if (hdmienc->renc->lvds)
56 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, 56 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
@@ -67,7 +67,7 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
67 struct drm_connector_state *conn_state) 67 struct drm_connector_state *conn_state)
68{ 68{
69 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 69 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
70 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 70 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
71 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 71 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
72 const struct drm_display_mode *mode = &crtc_state->mode; 72 const struct drm_display_mode *mode = &crtc_state->mode;
73 73
@@ -89,7 +89,7 @@ static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
89 struct drm_display_mode *adjusted_mode) 89 struct drm_display_mode *adjusted_mode)
90{ 90{
91 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 91 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
92 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 92 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
93 93
94 if (sfuncs->mode_set) 94 if (sfuncs->mode_set)
95 sfuncs->mode_set(encoder, mode, adjusted_mode); 95 sfuncs->mode_set(encoder, mode, adjusted_mode);
@@ -151,7 +151,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
151 goto error; 151 goto error;
152 152
153 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, 153 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
154 DRM_MODE_ENCODER_TMDS); 154 DRM_MODE_ENCODER_TMDS, NULL);
155 if (ret < 0) 155 if (ret < 0)
156 goto error; 156 goto error;
157 157
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index ffa583712cd9..c3ed9522c0e1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -410,7 +410,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
410 410
411 ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, 411 ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
412 &rcar_du_plane_funcs, formats, 412 &rcar_du_plane_funcs, formats,
413 ARRAY_SIZE(formats), type); 413 ARRAY_SIZE(formats), type,
414 NULL);
414 if (ret < 0) 415 if (ret < 0)
415 return ret; 416 return ret;
416 417
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 35215f6867d3..85739859dffc 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -25,3 +25,13 @@ config ROCKCHIP_DW_HDMI
25 for the Synopsys DesignWare HDMI driver. If you want to 25 for the Synopsys DesignWare HDMI driver. If you want to
26 enable HDMI on RK3288 based SoC, you should selet this 26 enable HDMI on RK3288 based SoC, you should selet this
27 option. 27 option.
28
29config ROCKCHIP_DW_MIPI_DSI
30 tristate "Rockchip specific extensions for Synopsys DW MIPI DSI"
31 depends on DRM_ROCKCHIP
32 select DRM_MIPI_DSI
33 help
34 This selects support for Rockchip SoC specific extensions
35 for the Synopsys DesignWare HDMI driver. If you want to
36 enable MIPI DSI on RK3288 based SoC, you should selet this
37 option.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index f3d8a19c641f..d1dc0f7b01db 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -6,5 +6,7 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
6 rockchip_drm_gem.o 6 rockchip_drm_gem.o
7 7
8obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o 8obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
9obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
9 10
10obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o 11obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o \
12 rockchip_vop_reg.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
new file mode 100644
index 000000000000..7bfe243c6173
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -0,0 +1,1194 @@
1/*
2 * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9#include <linux/clk.h>
10#include <linux/component.h>
11#include <linux/iopoll.h>
12#include <linux/math64.h>
13#include <linux/module.h>
14#include <linux/of_device.h>
15#include <linux/regmap.h>
16#include <linux/mfd/syscon.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
20#include <drm/drm_mipi_dsi.h>
21#include <drm/drm_of.h>
22#include <drm/drm_panel.h>
23#include <drm/drmP.h>
24#include <video/mipi_display.h>
25
26#include "rockchip_drm_drv.h"
27#include "rockchip_drm_vop.h"
28
29#define DRIVER_NAME "dw-mipi-dsi"
30
31#define GRF_SOC_CON6 0x025c
32#define DSI0_SEL_VOP_LIT (1 << 6)
33#define DSI1_SEL_VOP_LIT (1 << 9)
34
35#define DSI_VERSION 0x00
36#define DSI_PWR_UP 0x04
37#define RESET 0
38#define POWERUP BIT(0)
39
40#define DSI_CLKMGR_CFG 0x08
41#define TO_CLK_DIVIDSION(div) (((div) & 0xff) << 8)
42#define TX_ESC_CLK_DIVIDSION(div) (((div) & 0xff) << 0)
43
44#define DSI_DPI_VCID 0x0c
45#define DPI_VID(vid) (((vid) & 0x3) << 0)
46
47#define DSI_DPI_COLOR_CODING 0x10
48#define EN18_LOOSELY BIT(8)
49#define DPI_COLOR_CODING_16BIT_1 0x0
50#define DPI_COLOR_CODING_16BIT_2 0x1
51#define DPI_COLOR_CODING_16BIT_3 0x2
52#define DPI_COLOR_CODING_18BIT_1 0x3
53#define DPI_COLOR_CODING_18BIT_2 0x4
54#define DPI_COLOR_CODING_24BIT 0x5
55
56#define DSI_DPI_CFG_POL 0x14
57#define COLORM_ACTIVE_LOW BIT(4)
58#define SHUTD_ACTIVE_LOW BIT(3)
59#define HSYNC_ACTIVE_LOW BIT(2)
60#define VSYNC_ACTIVE_LOW BIT(1)
61#define DATAEN_ACTIVE_LOW BIT(0)
62
63#define DSI_DPI_LP_CMD_TIM 0x18
64#define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16)
65#define INVACT_LPCMD_TIME(p) ((p) & 0xff)
66
67#define DSI_DBI_CFG 0x20
68#define DSI_DBI_CMDSIZE 0x28
69
70#define DSI_PCKHDL_CFG 0x2c
71#define EN_CRC_RX BIT(4)
72#define EN_ECC_RX BIT(3)
73#define EN_BTA BIT(2)
74#define EN_EOTP_RX BIT(1)
75#define EN_EOTP_TX BIT(0)
76
77#define DSI_MODE_CFG 0x34
78#define ENABLE_VIDEO_MODE 0
79#define ENABLE_CMD_MODE BIT(0)
80
81#define DSI_VID_MODE_CFG 0x38
82#define FRAME_BTA_ACK BIT(14)
83#define ENABLE_LOW_POWER (0x3f << 8)
84#define ENABLE_LOW_POWER_MASK (0x3f << 8)
85#define VID_MODE_TYPE_BURST_SYNC_PULSES 0x2
86#define VID_MODE_TYPE_MASK 0x3
87
88#define DSI_VID_PKT_SIZE 0x3c
89#define VID_PKT_SIZE(p) (((p) & 0x3fff) << 0)
90#define VID_PKT_MAX_SIZE 0x3fff
91
92#define DSI_VID_HSA_TIME 0x48
93#define DSI_VID_HBP_TIME 0x4c
94#define DSI_VID_HLINE_TIME 0x50
95#define DSI_VID_VSA_LINES 0x54
96#define DSI_VID_VBP_LINES 0x58
97#define DSI_VID_VFP_LINES 0x5c
98#define DSI_VID_VACTIVE_LINES 0x60
99#define DSI_CMD_MODE_CFG 0x68
100#define MAX_RD_PKT_SIZE_LP BIT(24)
101#define DCS_LW_TX_LP BIT(19)
102#define DCS_SR_0P_TX_LP BIT(18)
103#define DCS_SW_1P_TX_LP BIT(17)
104#define DCS_SW_0P_TX_LP BIT(16)
105#define GEN_LW_TX_LP BIT(14)
106#define GEN_SR_2P_TX_LP BIT(13)
107#define GEN_SR_1P_TX_LP BIT(12)
108#define GEN_SR_0P_TX_LP BIT(11)
109#define GEN_SW_2P_TX_LP BIT(10)
110#define GEN_SW_1P_TX_LP BIT(9)
111#define GEN_SW_0P_TX_LP BIT(8)
112#define EN_ACK_RQST BIT(1)
113#define EN_TEAR_FX BIT(0)
114
115#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
116 DCS_LW_TX_LP | \
117 DCS_SR_0P_TX_LP | \
118 DCS_SW_1P_TX_LP | \
119 DCS_SW_0P_TX_LP | \
120 GEN_LW_TX_LP | \
121 GEN_SR_2P_TX_LP | \
122 GEN_SR_1P_TX_LP | \
123 GEN_SR_0P_TX_LP | \
124 GEN_SW_2P_TX_LP | \
125 GEN_SW_1P_TX_LP | \
126 GEN_SW_0P_TX_LP)
127
128#define DSI_GEN_HDR 0x6c
129#define GEN_HDATA(data) (((data) & 0xffff) << 8)
130#define GEN_HDATA_MASK (0xffff << 8)
131#define GEN_HTYPE(type) (((type) & 0xff) << 0)
132#define GEN_HTYPE_MASK 0xff
133
134#define DSI_GEN_PLD_DATA 0x70
135
136#define DSI_CMD_PKT_STATUS 0x74
137#define GEN_CMD_EMPTY BIT(0)
138#define GEN_CMD_FULL BIT(1)
139#define GEN_PLD_W_EMPTY BIT(2)
140#define GEN_PLD_W_FULL BIT(3)
141#define GEN_PLD_R_EMPTY BIT(4)
142#define GEN_PLD_R_FULL BIT(5)
143#define GEN_RD_CMD_BUSY BIT(6)
144
145#define DSI_TO_CNT_CFG 0x78
146#define HSTX_TO_CNT(p) (((p) & 0xffff) << 16)
147#define LPRX_TO_CNT(p) ((p) & 0xffff)
148
149#define DSI_BTA_TO_CNT 0x8c
150
151#define DSI_LPCLK_CTRL 0x94
152#define AUTO_CLKLANE_CTRL BIT(1)
153#define PHY_TXREQUESTCLKHS BIT(0)
154
155#define DSI_PHY_TMR_LPCLK_CFG 0x98
156#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
157#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
158
159#define DSI_PHY_TMR_CFG 0x9c
160#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
161#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
162#define MAX_RD_TIME(lbcc) ((lbcc) & 0x7fff)
163
164#define DSI_PHY_RSTZ 0xa0
165#define PHY_DISFORCEPLL 0
166#define PHY_ENFORCEPLL BIT(3)
167#define PHY_DISABLECLK 0
168#define PHY_ENABLECLK BIT(2)
169#define PHY_RSTZ 0
170#define PHY_UNRSTZ BIT(1)
171#define PHY_SHUTDOWNZ 0
172#define PHY_UNSHUTDOWNZ BIT(0)
173
174#define DSI_PHY_IF_CFG 0xa4
175#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
176#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
177
178#define DSI_PHY_STATUS 0xb0
179#define LOCK BIT(0)
180#define STOP_STATE_CLK_LANE BIT(2)
181
182#define DSI_PHY_TST_CTRL0 0xb4
183#define PHY_TESTCLK BIT(1)
184#define PHY_UNTESTCLK 0
185#define PHY_TESTCLR BIT(0)
186#define PHY_UNTESTCLR 0
187
188#define DSI_PHY_TST_CTRL1 0xb8
189#define PHY_TESTEN BIT(16)
190#define PHY_UNTESTEN 0
191#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
192#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
193
194#define DSI_INT_ST0 0xbc
195#define DSI_INT_ST1 0xc0
196#define DSI_INT_MSK0 0xc4
197#define DSI_INT_MSK1 0xc8
198
199#define PHY_STATUS_TIMEOUT_US 10000
200#define CMD_PKT_STATUS_TIMEOUT_US 20000
201
202#define BYPASS_VCO_RANGE BIT(7)
203#define VCO_RANGE_CON_SEL(val) (((val) & 0x7) << 3)
204#define VCO_IN_CAP_CON_DEFAULT (0x0 << 1)
205#define VCO_IN_CAP_CON_LOW (0x1 << 1)
206#define VCO_IN_CAP_CON_HIGH (0x2 << 1)
207#define REF_BIAS_CUR_SEL BIT(0)
208
209#define CP_CURRENT_3MA BIT(3)
210#define CP_PROGRAM_EN BIT(7)
211#define LPF_PROGRAM_EN BIT(6)
212#define LPF_RESISTORS_20_KOHM 0
213
214#define HSFREQRANGE_SEL(val) (((val) & 0x3f) << 1)
215
216#define INPUT_DIVIDER(val) ((val - 1) & 0x7f)
217#define LOW_PROGRAM_EN 0
218#define HIGH_PROGRAM_EN BIT(7)
219#define LOOP_DIV_LOW_SEL(val) ((val - 1) & 0x1f)
220#define LOOP_DIV_HIGH_SEL(val) (((val - 1) >> 5) & 0x1f)
221#define PLL_LOOP_DIV_EN BIT(5)
222#define PLL_INPUT_DIV_EN BIT(4)
223
224#define POWER_CONTROL BIT(6)
225#define INTERNAL_REG_CURRENT BIT(3)
226#define BIAS_BLOCK_ON BIT(2)
227#define BANDGAP_ON BIT(0)
228
229#define TER_RESISTOR_HIGH BIT(7)
230#define TER_RESISTOR_LOW 0
231#define LEVEL_SHIFTERS_ON BIT(6)
232#define TER_CAL_DONE BIT(5)
233#define SETRD_MAX (0x7 << 2)
234#define POWER_MANAGE BIT(1)
235#define TER_RESISTORS_ON BIT(0)
236
237#define BIASEXTR_SEL(val) ((val) & 0x7)
238#define BANDGAP_SEL(val) ((val) & 0x7)
239#define TLP_PROGRAM_EN BIT(7)
240#define THS_PRE_PROGRAM_EN BIT(7)
241#define THS_ZERO_PROGRAM_EN BIT(6)
242
243enum {
244 BANDGAP_97_07,
245 BANDGAP_98_05,
246 BANDGAP_99_02,
247 BANDGAP_100_00,
248 BANDGAP_93_17,
249 BANDGAP_94_15,
250 BANDGAP_95_12,
251 BANDGAP_96_10,
252};
253
254enum {
255 BIASEXTR_87_1,
256 BIASEXTR_91_5,
257 BIASEXTR_95_9,
258 BIASEXTR_100,
259 BIASEXTR_105_94,
260 BIASEXTR_111_88,
261 BIASEXTR_118_8,
262 BIASEXTR_127_7,
263};
264
265struct dw_mipi_dsi_plat_data {
266 unsigned int max_data_lanes;
267 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
268 struct drm_display_mode *mode);
269};
270
271struct dw_mipi_dsi {
272 struct drm_encoder encoder;
273 struct drm_connector connector;
274 struct mipi_dsi_host dsi_host;
275 struct drm_panel *panel;
276 struct device *dev;
277 struct regmap *grf_regmap;
278 void __iomem *base;
279
280 struct clk *pllref_clk;
281 struct clk *pclk;
282
283 unsigned int lane_mbps; /* per lane */
284 u32 channel;
285 u32 lanes;
286 u32 format;
287 u16 input_div;
288 u16 feedback_div;
289 struct drm_display_mode *mode;
290
291 const struct dw_mipi_dsi_plat_data *pdata;
292};
293
294enum dw_mipi_dsi_mode {
295 DW_MIPI_DSI_CMD_MODE,
296 DW_MIPI_DSI_VID_MODE,
297};
298
299struct dphy_pll_testdin_map {
300 unsigned int max_mbps;
301 u8 testdin;
302};
303
304/* The table is based on 27MHz DPHY pll reference clock. */
305static const struct dphy_pll_testdin_map dptdin_map[] = {
306 { 90, 0x00}, { 100, 0x10}, { 110, 0x20}, { 130, 0x01},
307 { 140, 0x11}, { 150, 0x21}, { 170, 0x02}, { 180, 0x12},
308 { 200, 0x22}, { 220, 0x03}, { 240, 0x13}, { 250, 0x23},
309 { 270, 0x04}, { 300, 0x14}, { 330, 0x05}, { 360, 0x15},
310 { 400, 0x25}, { 450, 0x06}, { 500, 0x16}, { 550, 0x07},
311 { 600, 0x17}, { 650, 0x08}, { 700, 0x18}, { 750, 0x09},
312 { 800, 0x19}, { 850, 0x29}, { 900, 0x39}, { 950, 0x0a},
313 {1000, 0x1a}, {1050, 0x2a}, {1100, 0x3a}, {1150, 0x0b},
314 {1200, 0x1b}, {1250, 0x2b}, {1300, 0x3b}, {1350, 0x0c},
315 {1400, 0x1c}, {1450, 0x2c}, {1500, 0x3c}
316};
317
318static int max_mbps_to_testdin(unsigned int max_mbps)
319{
320 int i;
321
322 for (i = 0; i < ARRAY_SIZE(dptdin_map); i++)
323 if (dptdin_map[i].max_mbps > max_mbps)
324 return dptdin_map[i].testdin;
325
326 return -EINVAL;
327}
328
329/*
330 * The controller should generate 2 frames before
331 * preparing the peripheral.
332 */
333static void dw_mipi_dsi_wait_for_two_frames(struct dw_mipi_dsi *dsi)
334{
335 int refresh, two_frames;
336
337 refresh = drm_mode_vrefresh(dsi->mode);
338 two_frames = DIV_ROUND_UP(MSEC_PER_SEC, refresh) * 2;
339 msleep(two_frames);
340}
341
342static inline struct dw_mipi_dsi *host_to_dsi(struct mipi_dsi_host *host)
343{
344 return container_of(host, struct dw_mipi_dsi, dsi_host);
345}
346
347static inline struct dw_mipi_dsi *con_to_dsi(struct drm_connector *con)
348{
349 return container_of(con, struct dw_mipi_dsi, connector);
350}
351
352static inline struct dw_mipi_dsi *encoder_to_dsi(struct drm_encoder *encoder)
353{
354 return container_of(encoder, struct dw_mipi_dsi, encoder);
355}
356static inline void dsi_write(struct dw_mipi_dsi *dsi, u32 reg, u32 val)
357{
358 writel(val, dsi->base + reg);
359}
360
361static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg)
362{
363 return readl(dsi->base + reg);
364}
365
366static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi *dsi, u8 test_code,
367 u8 test_data)
368{
369 /*
370 * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
371 * is latched internally as the current test code. Test data is
372 * programmed internally by rising edge on TESTCLK.
373 */
374 dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
375
376 dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
377 PHY_TESTDIN(test_code));
378
379 dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
380
381 dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
382 PHY_TESTDIN(test_data));
383
384 dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
385}
386
387static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
388{
389 int ret, testdin, vco, val;
390
391 vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
392
393 testdin = max_mbps_to_testdin(dsi->lane_mbps);
394 if (testdin < 0) {
395 dev_err(dsi->dev,
396 "failed to get testdin for %dmbps lane clock\n",
397 dsi->lane_mbps);
398 return testdin;
399 }
400
401 dsi_write(dsi, DSI_PWR_UP, POWERUP);
402
403 dw_mipi_dsi_phy_write(dsi, 0x10, BYPASS_VCO_RANGE |
404 VCO_RANGE_CON_SEL(vco) |
405 VCO_IN_CAP_CON_LOW |
406 REF_BIAS_CUR_SEL);
407
408 dw_mipi_dsi_phy_write(dsi, 0x11, CP_CURRENT_3MA);
409 dw_mipi_dsi_phy_write(dsi, 0x12, CP_PROGRAM_EN | LPF_PROGRAM_EN |
410 LPF_RESISTORS_20_KOHM);
411
412 dw_mipi_dsi_phy_write(dsi, 0x44, HSFREQRANGE_SEL(testdin));
413
414 dw_mipi_dsi_phy_write(dsi, 0x19, PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
415 dw_mipi_dsi_phy_write(dsi, 0x17, INPUT_DIVIDER(dsi->input_div));
416 dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_LOW_SEL(dsi->feedback_div) |
417 LOW_PROGRAM_EN);
418 dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
419 HIGH_PROGRAM_EN);
420
421 dw_mipi_dsi_phy_write(dsi, 0x20, POWER_CONTROL | INTERNAL_REG_CURRENT |
422 BIAS_BLOCK_ON | BANDGAP_ON);
423
424 dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_LOW | TER_CAL_DONE |
425 SETRD_MAX | TER_RESISTORS_ON);
426 dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
427 SETRD_MAX | POWER_MANAGE |
428 TER_RESISTORS_ON);
429
430 dw_mipi_dsi_phy_write(dsi, 0x22, LOW_PROGRAM_EN |
431 BIASEXTR_SEL(BIASEXTR_127_7));
432 dw_mipi_dsi_phy_write(dsi, 0x22, HIGH_PROGRAM_EN |
433 BANDGAP_SEL(BANDGAP_96_10));
434
435 dw_mipi_dsi_phy_write(dsi, 0x70, TLP_PROGRAM_EN | 0xf);
436 dw_mipi_dsi_phy_write(dsi, 0x71, THS_PRE_PROGRAM_EN | 0x55);
437 dw_mipi_dsi_phy_write(dsi, 0x72, THS_ZERO_PROGRAM_EN | 0xa);
438
439 dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
440 PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
441
442
443 ret = readx_poll_timeout(readl, dsi->base + DSI_PHY_STATUS,
444 val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
445 if (ret < 0) {
446 dev_err(dsi->dev, "failed to wait for phy lock state\n");
447 return ret;
448 }
449
450 ret = readx_poll_timeout(readl, dsi->base + DSI_PHY_STATUS,
451 val, val & STOP_STATE_CLK_LANE, 1000,
452 PHY_STATUS_TIMEOUT_US);
453 if (ret < 0) {
454 dev_err(dsi->dev,
455 "failed to wait for phy clk lane stop state\n");
456 return ret;
457 }
458
459 return ret;
460}
461
462static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi)
463{
464 unsigned int bpp, i, pre;
465 unsigned long mpclk, pllref, tmp;
466 unsigned int m = 1, n = 1, target_mbps = 1000;
467 unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
468
469 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
470 if (bpp < 0) {
471 dev_err(dsi->dev, "failed to get bpp for pixel format %d\n",
472 dsi->format);
473 return bpp;
474 }
475
476 mpclk = DIV_ROUND_UP(dsi->mode->clock, MSEC_PER_SEC);
477 if (mpclk) {
478 /* take 1 / 0.9, since mbps must big than bandwidth of RGB */
479 tmp = mpclk * (bpp / dsi->lanes) * 10 / 9;
480 if (tmp < max_mbps)
481 target_mbps = tmp;
482 else
483 dev_err(dsi->dev, "DPHY clock frequency is out of range\n");
484 }
485
486 pllref = DIV_ROUND_UP(clk_get_rate(dsi->pllref_clk), USEC_PER_SEC);
487 tmp = pllref;
488
489 for (i = 1; i < 6; i++) {
490 pre = pllref / i;
491 if ((tmp > (target_mbps % pre)) && (target_mbps / pre < 512)) {
492 tmp = target_mbps % pre;
493 n = i;
494 m = target_mbps / pre;
495 }
496 if (tmp == 0)
497 break;
498 }
499
500 dsi->lane_mbps = pllref / n * m;
501 dsi->input_div = n;
502 dsi->feedback_div = m;
503
504 return 0;
505}
506
507static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
508 struct mipi_dsi_device *device)
509{
510 struct dw_mipi_dsi *dsi = host_to_dsi(host);
511
512 if (device->lanes > dsi->pdata->max_data_lanes) {
513 dev_err(dsi->dev, "the number of data lanes(%u) is too many\n",
514 device->lanes);
515 return -EINVAL;
516 }
517
518 if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) ||
519 !(device->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
520 dev_err(dsi->dev, "device mode is unsupported\n");
521 return -EINVAL;
522 }
523
524 dsi->lanes = device->lanes;
525 dsi->channel = device->channel;
526 dsi->format = device->format;
527 dsi->panel = of_drm_find_panel(device->dev.of_node);
528 if (dsi->panel)
529 return drm_panel_attach(dsi->panel, &dsi->connector);
530
531 return -EINVAL;
532}
533
534static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
535 struct mipi_dsi_device *device)
536{
537 struct dw_mipi_dsi *dsi = host_to_dsi(host);
538
539 drm_panel_detach(dsi->panel);
540
541 return 0;
542}
543
544static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 val)
545{
546 int ret;
547
548 ret = readx_poll_timeout(readl, dsi->base + DSI_CMD_PKT_STATUS,
549 val, !(val & GEN_CMD_FULL), 1000,
550 CMD_PKT_STATUS_TIMEOUT_US);
551 if (ret < 0) {
552 dev_err(dsi->dev, "failed to get available command FIFO\n");
553 return ret;
554 }
555
556 dsi_write(dsi, DSI_GEN_HDR, val);
557
558 ret = readx_poll_timeout(readl, dsi->base + DSI_CMD_PKT_STATUS,
559 val, val & (GEN_CMD_EMPTY | GEN_PLD_W_EMPTY),
560 1000, CMD_PKT_STATUS_TIMEOUT_US);
561 if (ret < 0) {
562 dev_err(dsi->dev, "failed to write command FIFO\n");
563 return ret;
564 }
565
566 return 0;
567}
568
569static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
570 const struct mipi_dsi_msg *msg)
571{
572 const u16 *tx_buf = msg->tx_buf;
573 u32 val = GEN_HDATA(*tx_buf) | GEN_HTYPE(msg->type);
574
575 if (msg->tx_len > 2) {
576 dev_err(dsi->dev, "too long tx buf length %zu for short write\n",
577 msg->tx_len);
578 return -EINVAL;
579 }
580
581 return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
582}
583
584static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
585 const struct mipi_dsi_msg *msg)
586{
587 const u32 *tx_buf = msg->tx_buf;
588 int len = msg->tx_len, pld_data_bytes = sizeof(*tx_buf), ret;
589 u32 val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
590 u32 remainder = 0;
591
592 if (msg->tx_len < 3) {
593 dev_err(dsi->dev, "wrong tx buf length %zu for long write\n",
594 msg->tx_len);
595 return -EINVAL;
596 }
597
598 while (DIV_ROUND_UP(len, pld_data_bytes)) {
599 if (len < pld_data_bytes) {
600 memcpy(&remainder, tx_buf, len);
601 dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
602 len = 0;
603 } else {
604 dsi_write(dsi, DSI_GEN_PLD_DATA, *tx_buf);
605 tx_buf++;
606 len -= pld_data_bytes;
607 }
608
609 ret = readx_poll_timeout(readl, dsi->base + DSI_CMD_PKT_STATUS,
610 val, !(val & GEN_PLD_W_FULL), 1000,
611 CMD_PKT_STATUS_TIMEOUT_US);
612 if (ret < 0) {
613 dev_err(dsi->dev,
614 "failed to get available write payload FIFO\n");
615 return ret;
616 }
617 }
618
619 return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
620}
621
622static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
623 const struct mipi_dsi_msg *msg)
624{
625 struct dw_mipi_dsi *dsi = host_to_dsi(host);
626 int ret;
627
628 switch (msg->type) {
629 case MIPI_DSI_DCS_SHORT_WRITE:
630 case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
631 case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
632 ret = dw_mipi_dsi_dcs_short_write(dsi, msg);
633 break;
634 case MIPI_DSI_DCS_LONG_WRITE:
635 ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
636 break;
637 default:
638 dev_err(dsi->dev, "unsupported message type\n");
639 ret = -EINVAL;
640 }
641
642 return ret;
643}
644
645static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
646 .attach = dw_mipi_dsi_host_attach,
647 .detach = dw_mipi_dsi_host_detach,
648 .transfer = dw_mipi_dsi_host_transfer,
649};
650
651static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
652{
653 u32 val;
654
655 val = VID_MODE_TYPE_BURST_SYNC_PULSES | ENABLE_LOW_POWER;
656
657 dsi_write(dsi, DSI_VID_MODE_CFG, val);
658}
659
660static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
661 enum dw_mipi_dsi_mode mode)
662{
663 if (mode == DW_MIPI_DSI_CMD_MODE) {
664 dsi_write(dsi, DSI_PWR_UP, RESET);
665 dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
666 dsi_write(dsi, DSI_PWR_UP, POWERUP);
667 } else {
668 dsi_write(dsi, DSI_PWR_UP, RESET);
669 dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
670 dw_mipi_dsi_video_mode_config(dsi);
671 dsi_write(dsi, DSI_PWR_UP, POWERUP);
672 }
673}
674
675static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
676{
677 dsi_write(dsi, DSI_PWR_UP, RESET);
678 dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ);
679}
680
681static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
682{
683 dsi_write(dsi, DSI_PWR_UP, RESET);
684 dsi_write(dsi, DSI_PHY_RSTZ, PHY_DISFORCEPLL | PHY_DISABLECLK
685 | PHY_RSTZ | PHY_SHUTDOWNZ);
686 dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
687 TX_ESC_CLK_DIVIDSION(7));
688 dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
689}
690
691static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
692 struct drm_display_mode *mode)
693{
694 u32 val = 0, color = 0;
695
696 switch (dsi->format) {
697 case MIPI_DSI_FMT_RGB888:
698 color = DPI_COLOR_CODING_24BIT;
699 break;
700 case MIPI_DSI_FMT_RGB666:
701 color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
702 break;
703 case MIPI_DSI_FMT_RGB666_PACKED:
704 color = DPI_COLOR_CODING_18BIT_1;
705 break;
706 case MIPI_DSI_FMT_RGB565:
707 color = DPI_COLOR_CODING_16BIT_1;
708 break;
709 }
710
711 if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
712 val |= VSYNC_ACTIVE_LOW;
713 if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
714 val |= HSYNC_ACTIVE_LOW;
715
716 dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
717 dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
718 dsi_write(dsi, DSI_DPI_CFG_POL, val);
719 dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
720 | INVACT_LPCMD_TIME(4));
721}
722
723static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
724{
725 dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
726}
727
728static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
729 struct drm_display_mode *mode)
730{
731 dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
732}
733
734static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
735{
736 dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
737 dsi_write(dsi, DSI_BTA_TO_CNT, 0xd00);
738 dsi_write(dsi, DSI_CMD_MODE_CFG, CMD_MODE_ALL_LP);
739 dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
740}
741
742/* Get lane byte clock cycles. */
743static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
744 u32 hcomponent)
745{
746 u32 frac, lbcc;
747
748 lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
749
750 frac = lbcc % dsi->mode->clock;
751 lbcc = lbcc / dsi->mode->clock;
752 if (frac)
753 lbcc++;
754
755 return lbcc;
756}
757
758static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi)
759{
760 u32 htotal, hsa, hbp, lbcc;
761 struct drm_display_mode *mode = dsi->mode;
762
763 htotal = mode->htotal;
764 hsa = mode->hsync_end - mode->hsync_start;
765 hbp = mode->htotal - mode->hsync_end;
766
767 lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, htotal);
768 dsi_write(dsi, DSI_VID_HLINE_TIME, lbcc);
769
770 lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, hsa);
771 dsi_write(dsi, DSI_VID_HSA_TIME, lbcc);
772
773 lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, hbp);
774 dsi_write(dsi, DSI_VID_HBP_TIME, lbcc);
775}
776
777static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi)
778{
779 u32 vactive, vsa, vfp, vbp;
780 struct drm_display_mode *mode = dsi->mode;
781
782 vactive = mode->vdisplay;
783 vsa = mode->vsync_end - mode->vsync_start;
784 vfp = mode->vsync_start - mode->vdisplay;
785 vbp = mode->vtotal - mode->vsync_end;
786
787 dsi_write(dsi, DSI_VID_VACTIVE_LINES, vactive);
788 dsi_write(dsi, DSI_VID_VSA_LINES, vsa);
789 dsi_write(dsi, DSI_VID_VFP_LINES, vfp);
790 dsi_write(dsi, DSI_VID_VBP_LINES, vbp);
791}
792
793static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
794{
795 dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40)
796 | PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
797
798 dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
799 | PHY_CLKLP2HS_TIME(0x40));
800}
801
802static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
803{
804 dsi_write(dsi, DSI_PHY_IF_CFG, PHY_STOP_WAIT_TIME(0x20) |
805 N_LANES(dsi->lanes));
806}
807
808static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
809{
810 dsi_read(dsi, DSI_INT_ST0);
811 dsi_read(dsi, DSI_INT_ST1);
812 dsi_write(dsi, DSI_INT_MSK0, 0);
813 dsi_write(dsi, DSI_INT_MSK1, 0);
814}
815
816static void dw_mipi_dsi_encoder_mode_set(struct drm_encoder *encoder,
817 struct drm_display_mode *mode,
818 struct drm_display_mode *adjusted_mode)
819{
820 struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
821 int ret;
822
823 dsi->mode = adjusted_mode;
824
825 ret = dw_mipi_dsi_get_lane_bps(dsi);
826 if (ret < 0)
827 return;
828
829 if (clk_prepare_enable(dsi->pclk)) {
830 dev_err(dsi->dev, "%s: Failed to enable pclk\n", __func__);
831 return;
832 }
833
834 dw_mipi_dsi_init(dsi);
835 dw_mipi_dsi_dpi_config(dsi, mode);
836 dw_mipi_dsi_packet_handler_config(dsi);
837 dw_mipi_dsi_video_mode_config(dsi);
838 dw_mipi_dsi_video_packet_config(dsi, mode);
839 dw_mipi_dsi_command_mode_config(dsi);
840 dw_mipi_dsi_line_timer_config(dsi);
841 dw_mipi_dsi_vertical_timing_config(dsi);
842 dw_mipi_dsi_dphy_timing_config(dsi);
843 dw_mipi_dsi_dphy_interface_config(dsi);
844 dw_mipi_dsi_clear_err(dsi);
845 if (drm_panel_prepare(dsi->panel))
846 dev_err(dsi->dev, "failed to prepare panel\n");
847
848 clk_disable_unprepare(dsi->pclk);
849}
850
851static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
852{
853 struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
854
855 drm_panel_disable(dsi->panel);
856
857 if (clk_prepare_enable(dsi->pclk)) {
858 dev_err(dsi->dev, "%s: Failed to enable pclk\n", __func__);
859 return;
860 }
861
862 dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
863 drm_panel_unprepare(dsi->panel);
864 dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_VID_MODE);
865
866 /*
867 * This is necessary to make sure the peripheral will be driven
868 * normally when the display is enabled again later.
869 */
870 msleep(120);
871
872 dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
873 dw_mipi_dsi_disable(dsi);
874 clk_disable_unprepare(dsi->pclk);
875}
876
877static bool dw_mipi_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
878 const struct drm_display_mode *mode,
879 struct drm_display_mode *adjusted_mode)
880{
881 return true;
882}
883
884static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
885{
886 struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
887 int mux = rockchip_drm_encoder_get_mux_id(dsi->dev->of_node, encoder);
888 u32 interface_pix_fmt;
889 u32 val;
890
891 if (clk_prepare_enable(dsi->pclk)) {
892 dev_err(dsi->dev, "%s: Failed to enable pclk\n", __func__);
893 return;
894 }
895
896 dw_mipi_dsi_phy_init(dsi);
897 dw_mipi_dsi_wait_for_two_frames(dsi);
898
899 dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_VID_MODE);
900 drm_panel_enable(dsi->panel);
901
902 clk_disable_unprepare(dsi->pclk);
903
904 switch (dsi->format) {
905 case MIPI_DSI_FMT_RGB888:
906 interface_pix_fmt = ROCKCHIP_OUT_MODE_P888;
907 break;
908 case MIPI_DSI_FMT_RGB666:
909 interface_pix_fmt = ROCKCHIP_OUT_MODE_P666;
910 break;
911 case MIPI_DSI_FMT_RGB565:
912 interface_pix_fmt = ROCKCHIP_OUT_MODE_P565;
913 break;
914 default:
915 WARN_ON(1);
916 return;
917 }
918
919 rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_DSI,
920 interface_pix_fmt);
921
922 if (mux)
923 val = DSI0_SEL_VOP_LIT | (DSI0_SEL_VOP_LIT << 16);
924 else
925 val = DSI0_SEL_VOP_LIT << 16;
926
927 regmap_write(dsi->grf_regmap, GRF_SOC_CON6, val);
928 dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
929}
930
931static struct drm_encoder_helper_funcs
932dw_mipi_dsi_encoder_helper_funcs = {
933 .mode_fixup = dw_mipi_dsi_encoder_mode_fixup,
934 .commit = dw_mipi_dsi_encoder_commit,
935 .mode_set = dw_mipi_dsi_encoder_mode_set,
936 .disable = dw_mipi_dsi_encoder_disable,
937};
938
939static struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
940 .destroy = drm_encoder_cleanup,
941};
942
943static int dw_mipi_dsi_connector_get_modes(struct drm_connector *connector)
944{
945 struct dw_mipi_dsi *dsi = con_to_dsi(connector);
946
947 return drm_panel_get_modes(dsi->panel);
948}
949
950static enum drm_mode_status dw_mipi_dsi_mode_valid(
951 struct drm_connector *connector,
952 struct drm_display_mode *mode)
953{
954 struct dw_mipi_dsi *dsi = con_to_dsi(connector);
955
956 enum drm_mode_status mode_status = MODE_OK;
957
958 if (dsi->pdata->mode_valid)
959 mode_status = dsi->pdata->mode_valid(connector, mode);
960
961 return mode_status;
962}
963
964static struct drm_encoder *dw_mipi_dsi_connector_best_encoder(
965 struct drm_connector *connector)
966{
967 struct dw_mipi_dsi *dsi = con_to_dsi(connector);
968
969 return &dsi->encoder;
970}
971
972static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
973 .get_modes = dw_mipi_dsi_connector_get_modes,
974 .mode_valid = dw_mipi_dsi_mode_valid,
975 .best_encoder = dw_mipi_dsi_connector_best_encoder,
976};
977
978static enum drm_connector_status
979dw_mipi_dsi_detect(struct drm_connector *connector, bool force)
980{
981 return connector_status_connected;
982}
983
984static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
985{
986 drm_connector_unregister(connector);
987 drm_connector_cleanup(connector);
988}
989
990static struct drm_connector_funcs dw_mipi_dsi_atomic_connector_funcs = {
991 .dpms = drm_atomic_helper_connector_dpms,
992 .fill_modes = drm_helper_probe_single_connector_modes,
993 .detect = dw_mipi_dsi_detect,
994 .destroy = dw_mipi_dsi_drm_connector_destroy,
995 .reset = drm_atomic_helper_connector_reset,
996 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
997 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
998};
999
1000static int dw_mipi_dsi_register(struct drm_device *drm,
1001 struct dw_mipi_dsi *dsi)
1002{
1003 struct drm_encoder *encoder = &dsi->encoder;
1004 struct drm_connector *connector = &dsi->connector;
1005 struct device *dev = dsi->dev;
1006 int ret;
1007
1008 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm,
1009 dev->of_node);
1010 /*
1011 * If we failed to find the CRTC(s) which this encoder is
1012 * supposed to be connected to, it's because the CRTC has
1013 * not been registered yet. Defer probing, and hope that
1014 * the required CRTC is added later.
1015 */
1016 if (encoder->possible_crtcs == 0)
1017 return -EPROBE_DEFER;
1018
1019 drm_encoder_helper_add(&dsi->encoder,
1020 &dw_mipi_dsi_encoder_helper_funcs);
1021 ret = drm_encoder_init(drm, &dsi->encoder, &dw_mipi_dsi_encoder_funcs,
1022 DRM_MODE_ENCODER_DSI, NULL);
1023 if (ret) {
1024 dev_err(dev, "Failed to initialize encoder with drm\n");
1025 return ret;
1026 }
1027
1028 drm_connector_helper_add(connector,
1029 &dw_mipi_dsi_connector_helper_funcs);
1030
1031 drm_connector_init(drm, &dsi->connector,
1032 &dw_mipi_dsi_atomic_connector_funcs,
1033 DRM_MODE_CONNECTOR_DSI);
1034
1035 drm_mode_connector_attach_encoder(connector, encoder);
1036
1037 return 0;
1038}
1039
1040static int rockchip_mipi_parse_dt(struct dw_mipi_dsi *dsi)
1041{
1042 struct device_node *np = dsi->dev->of_node;
1043
1044 dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
1045 if (IS_ERR(dsi->grf_regmap)) {
1046 dev_err(dsi->dev, "Unable to get rockchip,grf\n");
1047 return PTR_ERR(dsi->grf_regmap);
1048 }
1049
1050 return 0;
1051}
1052
1053static enum drm_mode_status rk3288_mipi_dsi_mode_valid(
1054 struct drm_connector *connector,
1055 struct drm_display_mode *mode)
1056{
1057 /*
1058 * The VID_PKT_SIZE field in the DSI_VID_PKT_CFG
1059 * register is 11-bit.
1060 */
1061 if (mode->hdisplay > 0x7ff)
1062 return MODE_BAD_HVALUE;
1063
1064 /*
1065 * The V_ACTIVE_LINES field in the DSI_VTIMING_CFG
1066 * register is 11-bit.
1067 */
1068 if (mode->vdisplay > 0x7ff)
1069 return MODE_BAD_VVALUE;
1070
1071 return MODE_OK;
1072}
1073
1074static struct dw_mipi_dsi_plat_data rk3288_mipi_dsi_drv_data = {
1075 .max_data_lanes = 4,
1076 .mode_valid = rk3288_mipi_dsi_mode_valid,
1077};
1078
1079static const struct of_device_id dw_mipi_dsi_dt_ids[] = {
1080 {
1081 .compatible = "rockchip,rk3288-mipi-dsi",
1082 .data = &rk3288_mipi_dsi_drv_data,
1083 },
1084 { /* sentinel */ }
1085};
1086MODULE_DEVICE_TABLE(of, dw_mipi_dsi_dt_ids);
1087
1088static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
1089 void *data)
1090{
1091 const struct of_device_id *of_id =
1092 of_match_device(dw_mipi_dsi_dt_ids, dev);
1093 const struct dw_mipi_dsi_plat_data *pdata = of_id->data;
1094 struct platform_device *pdev = to_platform_device(dev);
1095 struct drm_device *drm = data;
1096 struct dw_mipi_dsi *dsi;
1097 struct resource *res;
1098 int ret;
1099
1100 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
1101 if (!dsi)
1102 return -ENOMEM;
1103
1104 dsi->dev = dev;
1105 dsi->pdata = pdata;
1106
1107 ret = rockchip_mipi_parse_dt(dsi);
1108 if (ret)
1109 return ret;
1110
1111 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1112 if (!res)
1113 return -ENODEV;
1114
1115 dsi->base = devm_ioremap_resource(dev, res);
1116 if (IS_ERR(dsi->base))
1117 return PTR_ERR(dsi->base);
1118
1119 dsi->pllref_clk = devm_clk_get(dev, "ref");
1120 if (IS_ERR(dsi->pllref_clk)) {
1121 ret = PTR_ERR(dsi->pllref_clk);
1122 dev_err(dev, "Unable to get pll reference clock: %d\n", ret);
1123 return ret;
1124 }
1125
1126 dsi->pclk = devm_clk_get(dev, "pclk");
1127 if (IS_ERR(dsi->pclk)) {
1128 ret = PTR_ERR(dsi->pclk);
1129 dev_err(dev, "Unable to get pclk: %d\n", ret);
1130 return ret;
1131 }
1132
1133 ret = clk_prepare_enable(dsi->pllref_clk);
1134 if (ret) {
1135 dev_err(dev, "%s: Failed to enable pllref_clk\n", __func__);
1136 return ret;
1137 }
1138
1139 ret = dw_mipi_dsi_register(drm, dsi);
1140 if (ret) {
1141 dev_err(dev, "Failed to register mipi_dsi: %d\n", ret);
1142 goto err_pllref;
1143 }
1144
1145 dev_set_drvdata(dev, dsi);
1146
1147 dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
1148 dsi->dsi_host.dev = dev;
1149 return mipi_dsi_host_register(&dsi->dsi_host);
1150
1151err_pllref:
1152 clk_disable_unprepare(dsi->pllref_clk);
1153 return ret;
1154}
1155
1156static void dw_mipi_dsi_unbind(struct device *dev, struct device *master,
1157 void *data)
1158{
1159 struct dw_mipi_dsi *dsi = dev_get_drvdata(dev);
1160
1161 mipi_dsi_host_unregister(&dsi->dsi_host);
1162 clk_disable_unprepare(dsi->pllref_clk);
1163}
1164
1165static const struct component_ops dw_mipi_dsi_ops = {
1166 .bind = dw_mipi_dsi_bind,
1167 .unbind = dw_mipi_dsi_unbind,
1168};
1169
1170static int dw_mipi_dsi_probe(struct platform_device *pdev)
1171{
1172 return component_add(&pdev->dev, &dw_mipi_dsi_ops);
1173}
1174
1175static int dw_mipi_dsi_remove(struct platform_device *pdev)
1176{
1177 component_del(&pdev->dev, &dw_mipi_dsi_ops);
1178 return 0;
1179}
1180
1181static struct platform_driver dw_mipi_dsi_driver = {
1182 .probe = dw_mipi_dsi_probe,
1183 .remove = dw_mipi_dsi_remove,
1184 .driver = {
1185 .of_match_table = dw_mipi_dsi_dt_ids,
1186 .name = DRIVER_NAME,
1187 },
1188};
1189module_platform_driver(dw_mipi_dsi_driver);
1190
1191MODULE_DESCRIPTION("ROCKCHIP MIPI DSI host controller driver");
1192MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
1193MODULE_LICENSE("GPL");
1194MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 80d6fc8a5cee..c65ce8cb30d3 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -173,7 +173,7 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
173 return (valid) ? MODE_OK : MODE_BAD; 173 return (valid) ? MODE_OK : MODE_BAD;
174} 174}
175 175
176static struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = { 176static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
177 .destroy = drm_encoder_cleanup, 177 .destroy = drm_encoder_cleanup,
178}; 178};
179 179
@@ -195,12 +195,15 @@ static void dw_hdmi_rockchip_encoder_mode_set(struct drm_encoder *encoder,
195{ 195{
196} 196}
197 197
198static void dw_hdmi_rockchip_encoder_commit(struct drm_encoder *encoder) 198static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
199{ 199{
200 struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder); 200 struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
201 u32 val; 201 u32 val;
202 int mux; 202 int mux;
203 203
204 rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
205 ROCKCHIP_OUT_MODE_AAAA);
206
204 mux = rockchip_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder); 207 mux = rockchip_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
205 if (mux) 208 if (mux)
206 val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16); 209 val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
@@ -212,17 +215,10 @@ static void dw_hdmi_rockchip_encoder_commit(struct drm_encoder *encoder)
212 (mux) ? "LIT" : "BIG"); 215 (mux) ? "LIT" : "BIG");
213} 216}
214 217
215static void dw_hdmi_rockchip_encoder_prepare(struct drm_encoder *encoder) 218static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
216{
217 rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
218 ROCKCHIP_OUT_MODE_AAAA);
219}
220
221static struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
222 .mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup, 219 .mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
223 .mode_set = dw_hdmi_rockchip_encoder_mode_set, 220 .mode_set = dw_hdmi_rockchip_encoder_mode_set,
224 .prepare = dw_hdmi_rockchip_encoder_prepare, 221 .enable = dw_hdmi_rockchip_encoder_enable,
225 .commit = dw_hdmi_rockchip_encoder_commit,
226 .disable = dw_hdmi_rockchip_encoder_disable, 222 .disable = dw_hdmi_rockchip_encoder_disable,
227}; 223};
228 224
@@ -295,7 +291,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
295 291
296 drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); 292 drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
297 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, 293 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
298 DRM_MODE_ENCODER_TMDS); 294 DRM_MODE_ENCODER_TMDS, NULL);
299 295
300 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 296 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
301} 297}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index f22e1e1ee64a..8397d1b62ef9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -64,11 +64,11 @@ void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
64} 64}
65EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device); 65EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
66 66
67int rockchip_register_crtc_funcs(struct drm_device *dev, 67int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
68 const struct rockchip_crtc_funcs *crtc_funcs, 68 const struct rockchip_crtc_funcs *crtc_funcs)
69 int pipe)
70{ 69{
71 struct rockchip_drm_private *priv = dev->dev_private; 70 int pipe = drm_crtc_index(crtc);
71 struct rockchip_drm_private *priv = crtc->dev->dev_private;
72 72
73 if (pipe > ROCKCHIP_MAX_CRTC) 73 if (pipe > ROCKCHIP_MAX_CRTC)
74 return -EINVAL; 74 return -EINVAL;
@@ -79,9 +79,10 @@ int rockchip_register_crtc_funcs(struct drm_device *dev,
79} 79}
80EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs); 80EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
81 81
82void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe) 82void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
83{ 83{
84 struct rockchip_drm_private *priv = dev->dev_private; 84 int pipe = drm_crtc_index(crtc);
85 struct rockchip_drm_private *priv = crtc->dev->dev_private;
85 86
86 if (pipe > ROCKCHIP_MAX_CRTC) 87 if (pipe > ROCKCHIP_MAX_CRTC)
87 return; 88 return;
@@ -139,6 +140,9 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
139 if (!private) 140 if (!private)
140 return -ENOMEM; 141 return -ENOMEM;
141 142
143 mutex_init(&private->commit.lock);
144 INIT_WORK(&private->commit.work, rockchip_drm_atomic_work);
145
142 drm_dev->dev_private = private; 146 drm_dev->dev_private = private;
143 147
144 drm_mode_config_init(drm_dev); 148 drm_mode_config_init(drm_dev);
@@ -212,6 +216,8 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
212 */ 216 */
213 drm_dev->vblank_disable_allowed = true; 217 drm_dev->vblank_disable_allowed = true;
214 218
219 drm_mode_config_reset(drm_dev);
220
215 ret = rockchip_drm_fbdev_init(drm_dev); 221 ret = rockchip_drm_fbdev_init(drm_dev);
216 if (ret) 222 if (ret)
217 goto err_vblank_cleanup; 223 goto err_vblank_cleanup;
@@ -275,7 +281,8 @@ const struct vm_operations_struct rockchip_drm_vm_ops = {
275}; 281};
276 282
277static struct drm_driver rockchip_drm_driver = { 283static struct drm_driver rockchip_drm_driver = {
278 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 284 .driver_features = DRIVER_MODESET | DRIVER_GEM |
285 DRIVER_PRIME | DRIVER_ATOMIC,
279 .load = rockchip_drm_load, 286 .load = rockchip_drm_load,
280 .unload = rockchip_drm_unload, 287 .unload = rockchip_drm_unload,
281 .lastclose = rockchip_drm_lastclose, 288 .lastclose = rockchip_drm_lastclose,
@@ -450,10 +457,6 @@ static int rockchip_drm_bind(struct device *dev)
450 if (!drm) 457 if (!drm)
451 return -ENOMEM; 458 return -ENOMEM;
452 459
453 ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
454 if (ret)
455 goto err_free;
456
457 ret = drm_dev_register(drm, 0); 460 ret = drm_dev_register(drm, 0);
458 if (ret) 461 if (ret)
459 goto err_free; 462 goto err_free;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index dc4e5f03ac79..bb8b076f1dbb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -18,6 +18,7 @@
18#define _ROCKCHIP_DRM_DRV_H 18#define _ROCKCHIP_DRM_DRV_H
19 19
20#include <drm/drm_fb_helper.h> 20#include <drm/drm_fb_helper.h>
21#include <drm/drm_atomic_helper.h>
21#include <drm/drm_gem.h> 22#include <drm/drm_gem.h>
22 23
23#include <linux/module.h> 24#include <linux/module.h>
@@ -38,6 +39,14 @@ struct drm_connector;
38struct rockchip_crtc_funcs { 39struct rockchip_crtc_funcs {
39 int (*enable_vblank)(struct drm_crtc *crtc); 40 int (*enable_vblank)(struct drm_crtc *crtc);
40 void (*disable_vblank)(struct drm_crtc *crtc); 41 void (*disable_vblank)(struct drm_crtc *crtc);
42 void (*wait_for_update)(struct drm_crtc *crtc);
43};
44
45struct rockchip_atomic_commit {
46 struct work_struct work;
47 struct drm_atomic_state *state;
48 struct drm_device *dev;
49 struct mutex lock;
41}; 50};
42 51
43/* 52/*
@@ -50,12 +59,14 @@ struct rockchip_drm_private {
50 struct drm_fb_helper fbdev_helper; 59 struct drm_fb_helper fbdev_helper;
51 struct drm_gem_object *fbdev_bo; 60 struct drm_gem_object *fbdev_bo;
52 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; 61 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
62
63 struct rockchip_atomic_commit commit;
53}; 64};
54 65
55int rockchip_register_crtc_funcs(struct drm_device *dev, 66void rockchip_drm_atomic_work(struct work_struct *work);
56 const struct rockchip_crtc_funcs *crtc_funcs, 67int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
57 int pipe); 68 const struct rockchip_crtc_funcs *crtc_funcs);
58void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe); 69void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
59int rockchip_drm_encoder_get_mux_id(struct device_node *node, 70int rockchip_drm_encoder_get_mux_id(struct device_node *node,
60 struct drm_encoder *encoder); 71 struct drm_encoder *encoder);
61int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type, 72int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
@@ -64,5 +75,4 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
64 struct device *dev); 75 struct device *dev);
65void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, 76void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
66 struct device *dev); 77 struct device *dev);
67
68#endif /* _ROCKCHIP_DRM_DRV_H_ */ 78#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index b8ac5911c102..f7844883cb76 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <drm/drm.h> 16#include <drm/drm.h>
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18#include <drm/drm_atomic.h>
18#include <drm/drm_fb_helper.h> 19#include <drm/drm_fb_helper.h>
19#include <drm/drm_crtc_helper.h> 20#include <drm/drm_crtc_helper.h>
20 21
@@ -66,7 +67,7 @@ static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
66 rockchip_fb->obj[0], handle); 67 rockchip_fb->obj[0], handle);
67} 68}
68 69
69static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = { 70static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
70 .destroy = rockchip_drm_fb_destroy, 71 .destroy = rockchip_drm_fb_destroy,
71 .create_handle = rockchip_drm_fb_create_handle, 72 .create_handle = rockchip_drm_fb_create_handle,
72}; 73};
@@ -166,9 +167,131 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
166 drm_fb_helper_hotplug_event(fb_helper); 167 drm_fb_helper_hotplug_event(fb_helper);
167} 168}
168 169
170static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
171{
172 struct rockchip_drm_private *priv = crtc->dev->dev_private;
173 int pipe = drm_crtc_index(crtc);
174 const struct rockchip_crtc_funcs *crtc_funcs = priv->crtc_funcs[pipe];
175
176 if (crtc_funcs && crtc_funcs->wait_for_update)
177 crtc_funcs->wait_for_update(crtc);
178}
179
180static void
181rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
182{
183 struct drm_crtc_state *old_crtc_state;
184 struct drm_crtc *crtc;
185 int i, ret;
186
187 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
188 /* No one cares about the old state, so abuse it for tracking
189 * and store whether we hold a vblank reference (and should do a
190 * vblank wait) in the ->enable boolean.
191 */
192 old_crtc_state->enable = false;
193
194 if (!crtc->state->active)
195 continue;
196
197 ret = drm_crtc_vblank_get(crtc);
198 if (ret != 0)
199 continue;
200
201 old_crtc_state->enable = true;
202 }
203
204 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
205 if (!old_crtc_state->enable)
206 continue;
207
208 rockchip_crtc_wait_for_update(crtc);
209 drm_crtc_vblank_put(crtc);
210 }
211}
212
213static void
214rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
215{
216 struct drm_atomic_state *state = commit->state;
217 struct drm_device *dev = commit->dev;
218
219 /*
220 * TODO: do fence wait here.
221 */
222
223 /*
224 * Rockchip crtc support runtime PM, can't update display planes
225 * when crtc is disabled.
226 *
227 * drm_atomic_helper_commit comments detail that:
228 * For drivers supporting runtime PM the recommended sequence is
229 *
230 * drm_atomic_helper_commit_modeset_disables(dev, state);
231 *
232 * drm_atomic_helper_commit_modeset_enables(dev, state);
233 *
234 * drm_atomic_helper_commit_planes(dev, state, true);
235 *
236 * See the kerneldoc entries for these three functions for more details.
237 */
238 drm_atomic_helper_commit_modeset_disables(dev, state);
239
240 drm_atomic_helper_commit_modeset_enables(dev, state);
241
242 drm_atomic_helper_commit_planes(dev, state, true);
243
244 rockchip_atomic_wait_for_complete(state);
245
246 drm_atomic_helper_cleanup_planes(dev, state);
247
248 drm_atomic_state_free(state);
249}
250
251void rockchip_drm_atomic_work(struct work_struct *work)
252{
253 struct rockchip_atomic_commit *commit = container_of(work,
254 struct rockchip_atomic_commit, work);
255
256 rockchip_atomic_commit_complete(commit);
257}
258
259int rockchip_drm_atomic_commit(struct drm_device *dev,
260 struct drm_atomic_state *state,
261 bool async)
262{
263 struct rockchip_drm_private *private = dev->dev_private;
264 struct rockchip_atomic_commit *commit = &private->commit;
265 int ret;
266
267 ret = drm_atomic_helper_prepare_planes(dev, state);
268 if (ret)
269 return ret;
270
271 /* serialize outstanding asynchronous commits */
272 mutex_lock(&commit->lock);
273 flush_work(&commit->work);
274
275 drm_atomic_helper_swap_state(dev, state);
276
277 commit->dev = dev;
278 commit->state = state;
279
280 if (async)
281 schedule_work(&commit->work);
282 else
283 rockchip_atomic_commit_complete(commit);
284
285 mutex_unlock(&commit->lock);
286
287 return 0;
288}
289
169static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 290static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
170 .fb_create = rockchip_user_fb_create, 291 .fb_create = rockchip_user_fb_create,
171 .output_poll_changed = rockchip_drm_output_poll_changed, 292 .output_poll_changed = rockchip_drm_output_poll_changed,
293 .atomic_check = drm_atomic_helper_check,
294 .atomic_commit = rockchip_drm_atomic_commit,
172}; 295};
173 296
174struct drm_framebuffer * 297struct drm_framebuffer *
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 03c47eeadc81..46c2a8dfd8aa 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -14,6 +14,7 @@
14 14
15#include <drm/drm.h> 15#include <drm/drm.h>
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_atomic.h>
17#include <drm/drm_crtc.h> 18#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h> 19#include <drm/drm_crtc_helper.h>
19#include <drm/drm_plane_helper.h> 20#include <drm/drm_plane_helper.h>
@@ -35,11 +36,6 @@
35#include "rockchip_drm_fb.h" 36#include "rockchip_drm_fb.h"
36#include "rockchip_drm_vop.h" 37#include "rockchip_drm_vop.h"
37 38
38#define VOP_REG(off, _mask, s) \
39 {.offset = off, \
40 .mask = _mask, \
41 .shift = s,}
42
43#define __REG_SET_RELAXED(x, off, mask, shift, v) \ 39#define __REG_SET_RELAXED(x, off, mask, shift, v) \
44 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift) 40 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
45#define __REG_SET_NORMAL(x, off, mask, shift, v) \ 41#define __REG_SET_NORMAL(x, off, mask, shift, v) \
@@ -47,14 +43,35 @@
47 43
48#define REG_SET(x, base, reg, v, mode) \ 44#define REG_SET(x, base, reg, v, mode) \
49 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) 45 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
46#define REG_SET_MASK(x, base, reg, v, mode) \
47 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
50 48
51#define VOP_WIN_SET(x, win, name, v) \ 49#define VOP_WIN_SET(x, win, name, v) \
52 REG_SET(x, win->base, win->phy->name, v, RELAXED) 50 REG_SET(x, win->base, win->phy->name, v, RELAXED)
53#define VOP_SCL_SET(x, win, name, v) \ 51#define VOP_SCL_SET(x, win, name, v) \
54 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED) 52 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
53#define VOP_SCL_SET_EXT(x, win, name, v) \
54 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
55#define VOP_CTRL_SET(x, name, v) \ 55#define VOP_CTRL_SET(x, name, v) \
56 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL) 56 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
57 57
58#define VOP_INTR_GET(vop, name) \
59 vop_read_reg(vop, 0, &vop->data->ctrl->name)
60
61#define VOP_INTR_SET(vop, name, v) \
62 REG_SET(vop, 0, vop->data->intr->name, v, NORMAL)
63#define VOP_INTR_SET_TYPE(vop, name, type, v) \
64 do { \
65 int i, reg = 0; \
66 for (i = 0; i < vop->data->intr->nintrs; i++) { \
67 if (vop->data->intr->intrs[i] & type) \
68 reg |= (v) << i; \
69 } \
70 VOP_INTR_SET(vop, name, reg); \
71 } while (0)
72#define VOP_INTR_GET_TYPE(vop, name, type) \
73 vop_get_intr_type(vop, &vop->data->intr->name, type)
74
58#define VOP_WIN_GET(x, win, name) \ 75#define VOP_WIN_GET(x, win, name) \
59 vop_read_reg(x, win->base, &win->phy->name) 76 vop_read_reg(x, win->base, &win->phy->name)
60 77
@@ -63,12 +80,15 @@
63 80
64#define to_vop(x) container_of(x, struct vop, crtc) 81#define to_vop(x) container_of(x, struct vop, crtc)
65#define to_vop_win(x) container_of(x, struct vop_win, base) 82#define to_vop_win(x) container_of(x, struct vop_win, base)
83#define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
66 84
67struct vop_win_state { 85struct vop_plane_state {
68 struct list_head head; 86 struct drm_plane_state base;
69 struct drm_framebuffer *fb; 87 int format;
88 struct drm_rect src;
89 struct drm_rect dest;
70 dma_addr_t yrgb_mst; 90 dma_addr_t yrgb_mst;
71 struct drm_pending_vblank_event *event; 91 bool enable;
72}; 92};
73 93
74struct vop_win { 94struct vop_win {
@@ -76,8 +96,7 @@ struct vop_win {
76 const struct vop_win_data *data; 96 const struct vop_win_data *data;
77 struct vop *vop; 97 struct vop *vop;
78 98
79 struct list_head pending; 99 struct vop_plane_state state;
80 struct vop_win_state *active;
81}; 100};
82 101
83struct vop { 102struct vop {
@@ -86,13 +105,12 @@ struct vop {
86 struct drm_device *drm_dev; 105 struct drm_device *drm_dev;
87 bool is_enabled; 106 bool is_enabled;
88 107
89 int connector_type;
90 int connector_out_mode;
91
92 /* mutex vsync_ work */ 108 /* mutex vsync_ work */
93 struct mutex vsync_mutex; 109 struct mutex vsync_mutex;
94 bool vsync_work_pending; 110 bool vsync_work_pending;
95 struct completion dsp_hold_completion; 111 struct completion dsp_hold_completion;
112 struct completion wait_update_complete;
113 struct drm_pending_vblank_event *event;
96 114
97 const struct vop_data *data; 115 const struct vop_data *data;
98 116
@@ -119,263 +137,9 @@ struct vop {
119 /* vop dclk reset */ 137 /* vop dclk reset */
120 struct reset_control *dclk_rst; 138 struct reset_control *dclk_rst;
121 139
122 int pipe;
123
124 struct vop_win win[]; 140 struct vop_win win[];
125}; 141};
126 142
127enum vop_data_format {
128 VOP_FMT_ARGB8888 = 0,
129 VOP_FMT_RGB888,
130 VOP_FMT_RGB565,
131 VOP_FMT_YUV420SP = 4,
132 VOP_FMT_YUV422SP,
133 VOP_FMT_YUV444SP,
134};
135
136struct vop_reg_data {
137 uint32_t offset;
138 uint32_t value;
139};
140
141struct vop_reg {
142 uint32_t offset;
143 uint32_t shift;
144 uint32_t mask;
145};
146
147struct vop_ctrl {
148 struct vop_reg standby;
149 struct vop_reg data_blank;
150 struct vop_reg gate_en;
151 struct vop_reg mmu_en;
152 struct vop_reg rgb_en;
153 struct vop_reg edp_en;
154 struct vop_reg hdmi_en;
155 struct vop_reg mipi_en;
156 struct vop_reg out_mode;
157 struct vop_reg dither_down;
158 struct vop_reg dither_up;
159 struct vop_reg pin_pol;
160
161 struct vop_reg htotal_pw;
162 struct vop_reg hact_st_end;
163 struct vop_reg vtotal_pw;
164 struct vop_reg vact_st_end;
165 struct vop_reg hpost_st_end;
166 struct vop_reg vpost_st_end;
167};
168
169struct vop_scl_regs {
170 struct vop_reg cbcr_vsd_mode;
171 struct vop_reg cbcr_vsu_mode;
172 struct vop_reg cbcr_hsd_mode;
173 struct vop_reg cbcr_ver_scl_mode;
174 struct vop_reg cbcr_hor_scl_mode;
175 struct vop_reg yrgb_vsd_mode;
176 struct vop_reg yrgb_vsu_mode;
177 struct vop_reg yrgb_hsd_mode;
178 struct vop_reg yrgb_ver_scl_mode;
179 struct vop_reg yrgb_hor_scl_mode;
180 struct vop_reg line_load_mode;
181 struct vop_reg cbcr_axi_gather_num;
182 struct vop_reg yrgb_axi_gather_num;
183 struct vop_reg vsd_cbcr_gt2;
184 struct vop_reg vsd_cbcr_gt4;
185 struct vop_reg vsd_yrgb_gt2;
186 struct vop_reg vsd_yrgb_gt4;
187 struct vop_reg bic_coe_sel;
188 struct vop_reg cbcr_axi_gather_en;
189 struct vop_reg yrgb_axi_gather_en;
190
191 struct vop_reg lb_mode;
192 struct vop_reg scale_yrgb_x;
193 struct vop_reg scale_yrgb_y;
194 struct vop_reg scale_cbcr_x;
195 struct vop_reg scale_cbcr_y;
196};
197
198struct vop_win_phy {
199 const struct vop_scl_regs *scl;
200 const uint32_t *data_formats;
201 uint32_t nformats;
202
203 struct vop_reg enable;
204 struct vop_reg format;
205 struct vop_reg rb_swap;
206 struct vop_reg act_info;
207 struct vop_reg dsp_info;
208 struct vop_reg dsp_st;
209 struct vop_reg yrgb_mst;
210 struct vop_reg uv_mst;
211 struct vop_reg yrgb_vir;
212 struct vop_reg uv_vir;
213
214 struct vop_reg dst_alpha_ctl;
215 struct vop_reg src_alpha_ctl;
216};
217
218struct vop_win_data {
219 uint32_t base;
220 const struct vop_win_phy *phy;
221 enum drm_plane_type type;
222};
223
224struct vop_data {
225 const struct vop_reg_data *init_table;
226 unsigned int table_size;
227 const struct vop_ctrl *ctrl;
228 const struct vop_win_data *win;
229 unsigned int win_size;
230};
231
232static const uint32_t formats_01[] = {
233 DRM_FORMAT_XRGB8888,
234 DRM_FORMAT_ARGB8888,
235 DRM_FORMAT_XBGR8888,
236 DRM_FORMAT_ABGR8888,
237 DRM_FORMAT_RGB888,
238 DRM_FORMAT_BGR888,
239 DRM_FORMAT_RGB565,
240 DRM_FORMAT_BGR565,
241 DRM_FORMAT_NV12,
242 DRM_FORMAT_NV16,
243 DRM_FORMAT_NV24,
244};
245
246static const uint32_t formats_234[] = {
247 DRM_FORMAT_XRGB8888,
248 DRM_FORMAT_ARGB8888,
249 DRM_FORMAT_XBGR8888,
250 DRM_FORMAT_ABGR8888,
251 DRM_FORMAT_RGB888,
252 DRM_FORMAT_BGR888,
253 DRM_FORMAT_RGB565,
254 DRM_FORMAT_BGR565,
255};
256
257static const struct vop_scl_regs win_full_scl = {
258 .cbcr_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 31),
259 .cbcr_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 30),
260 .cbcr_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 28),
261 .cbcr_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 26),
262 .cbcr_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 24),
263 .yrgb_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 23),
264 .yrgb_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 22),
265 .yrgb_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 20),
266 .yrgb_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 18),
267 .yrgb_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 16),
268 .line_load_mode = VOP_REG(WIN0_CTRL1, 0x1, 15),
269 .cbcr_axi_gather_num = VOP_REG(WIN0_CTRL1, 0x7, 12),
270 .yrgb_axi_gather_num = VOP_REG(WIN0_CTRL1, 0xf, 8),
271 .vsd_cbcr_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 7),
272 .vsd_cbcr_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 6),
273 .vsd_yrgb_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 5),
274 .vsd_yrgb_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 4),
275 .bic_coe_sel = VOP_REG(WIN0_CTRL1, 0x3, 2),
276 .cbcr_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 1),
277 .yrgb_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 0),
278 .lb_mode = VOP_REG(WIN0_CTRL0, 0x7, 5),
279 .scale_yrgb_x = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
280 .scale_yrgb_y = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
281 .scale_cbcr_x = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
282 .scale_cbcr_y = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 16),
283};
284
285static const struct vop_win_phy win01_data = {
286 .scl = &win_full_scl,
287 .data_formats = formats_01,
288 .nformats = ARRAY_SIZE(formats_01),
289 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
290 .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
291 .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
292 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
293 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
294 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
295 .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
296 .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
297 .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
298 .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
299 .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
300 .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
301};
302
303static const struct vop_win_phy win23_data = {
304 .data_formats = formats_234,
305 .nformats = ARRAY_SIZE(formats_234),
306 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
307 .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
308 .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
309 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
310 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
311 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
312 .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
313 .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
314 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
315};
316
317static const struct vop_ctrl ctrl_data = {
318 .standby = VOP_REG(SYS_CTRL, 0x1, 22),
319 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
320 .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
321 .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
322 .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
323 .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
324 .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
325 .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
326 .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
327 .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
328 .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
329 .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
330 .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
331 .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
332 .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
333 .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
334 .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
335 .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
336};
337
338static const struct vop_reg_data vop_init_reg_table[] = {
339 {SYS_CTRL, 0x00c00000},
340 {DSP_CTRL0, 0x00000000},
341 {WIN0_CTRL0, 0x00000080},
342 {WIN1_CTRL0, 0x00000080},
343 /* TODO: Win2/3 support multiple area function, but we haven't found
344 * a suitable way to use it yet, so let's just use them as other windows
345 * with only area 0 enabled.
346 */
347 {WIN2_CTRL0, 0x00000010},
348 {WIN3_CTRL0, 0x00000010},
349};
350
351/*
352 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
353 * special support to get alpha blending working. For now, just use overlay
354 * window 3 for the drm cursor.
355 *
356 */
357static const struct vop_win_data rk3288_vop_win_data[] = {
358 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
359 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
360 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
361 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
362};
363
364static const struct vop_data rk3288_vop = {
365 .init_table = vop_init_reg_table,
366 .table_size = ARRAY_SIZE(vop_init_reg_table),
367 .ctrl = &ctrl_data,
368 .win = rk3288_vop_win_data,
369 .win_size = ARRAY_SIZE(rk3288_vop_win_data),
370};
371
372static const struct of_device_id vop_driver_dt_match[] = {
373 { .compatible = "rockchip,rk3288-vop",
374 .data = &rk3288_vop },
375 {},
376};
377MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
378
379static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 143static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
380{ 144{
381 writel(v, vop->regs + offset); 145 writel(v, vop->regs + offset);
@@ -393,11 +157,6 @@ static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
393 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask; 157 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
394} 158}
395 159
396static inline void vop_cfg_done(struct vop *vop)
397{
398 writel(0x01, vop->regs + REG_CFG_DONE);
399}
400
401static inline void vop_mask_write(struct vop *vop, uint32_t offset, 160static inline void vop_mask_write(struct vop *vop, uint32_t offset,
402 uint32_t mask, uint32_t v) 161 uint32_t mask, uint32_t v)
403{ 162{
@@ -422,6 +181,25 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
422 } 181 }
423} 182}
424 183
184static inline uint32_t vop_get_intr_type(struct vop *vop,
185 const struct vop_reg *reg, int type)
186{
187 uint32_t i, ret = 0;
188 uint32_t regs = vop_read_reg(vop, 0, reg);
189
190 for (i = 0; i < vop->data->intr->nintrs; i++) {
191 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
192 ret |= vop->data->intr->intrs[i];
193 }
194
195 return ret;
196}
197
198static inline void vop_cfg_done(struct vop *vop)
199{
200 VOP_CTRL_SET(vop, cfg_done, 1);
201}
202
425static bool has_rb_swapped(uint32_t format) 203static bool has_rb_swapped(uint32_t format)
426{ 204{
427 switch (format) { 205 switch (format) {
@@ -537,6 +315,20 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
537 return; 315 return;
538 } 316 }
539 317
318 if (!win->phy->scl->ext) {
319 VOP_SCL_SET(vop, win, scale_yrgb_x,
320 scl_cal_scale2(src_w, dst_w));
321 VOP_SCL_SET(vop, win, scale_yrgb_y,
322 scl_cal_scale2(src_h, dst_h));
323 if (is_yuv) {
324 VOP_SCL_SET(vop, win, scale_cbcr_x,
325 scl_cal_scale2(src_w, dst_w));
326 VOP_SCL_SET(vop, win, scale_cbcr_y,
327 scl_cal_scale2(src_h, dst_h));
328 }
329 return;
330 }
331
540 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w); 332 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
541 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h); 333 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
542 334
@@ -554,7 +346,7 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
554 lb_mode = scl_vop_cal_lb_mode(src_w, false); 346 lb_mode = scl_vop_cal_lb_mode(src_w, false);
555 } 347 }
556 348
557 VOP_SCL_SET(vop, win, lb_mode, lb_mode); 349 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
558 if (lb_mode == LB_RGB_3840X2) { 350 if (lb_mode == LB_RGB_3840X2) {
559 if (yrgb_ver_scl_mode != SCALE_NONE) { 351 if (yrgb_ver_scl_mode != SCALE_NONE) {
560 DRM_ERROR("ERROR : not allow yrgb ver scale\n"); 352 DRM_ERROR("ERROR : not allow yrgb ver scale\n");
@@ -578,14 +370,14 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
578 false, vsu_mode, &vskiplines); 370 false, vsu_mode, &vskiplines);
579 VOP_SCL_SET(vop, win, scale_yrgb_y, val); 371 VOP_SCL_SET(vop, win, scale_yrgb_y, val);
580 372
581 VOP_SCL_SET(vop, win, vsd_yrgb_gt4, vskiplines == 4); 373 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
582 VOP_SCL_SET(vop, win, vsd_yrgb_gt2, vskiplines == 2); 374 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
583 375
584 VOP_SCL_SET(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode); 376 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
585 VOP_SCL_SET(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode); 377 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
586 VOP_SCL_SET(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL); 378 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
587 VOP_SCL_SET(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL); 379 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
588 VOP_SCL_SET(vop, win, yrgb_vsu_mode, vsu_mode); 380 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
589 if (is_yuv) { 381 if (is_yuv) {
590 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w, 382 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
591 dst_w, true, 0, NULL); 383 dst_w, true, 0, NULL);
@@ -594,13 +386,13 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
594 dst_h, false, vsu_mode, &vskiplines); 386 dst_h, false, vsu_mode, &vskiplines);
595 VOP_SCL_SET(vop, win, scale_cbcr_y, val); 387 VOP_SCL_SET(vop, win, scale_cbcr_y, val);
596 388
597 VOP_SCL_SET(vop, win, vsd_cbcr_gt4, vskiplines == 4); 389 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
598 VOP_SCL_SET(vop, win, vsd_cbcr_gt2, vskiplines == 2); 390 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
599 VOP_SCL_SET(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode); 391 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
600 VOP_SCL_SET(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode); 392 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
601 VOP_SCL_SET(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL); 393 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
602 VOP_SCL_SET(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL); 394 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
603 VOP_SCL_SET(vop, win, cbcr_vsu_mode, vsu_mode); 395 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
604 } 396 }
605} 397}
606 398
@@ -613,8 +405,7 @@ static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
613 405
614 spin_lock_irqsave(&vop->irq_lock, flags); 406 spin_lock_irqsave(&vop->irq_lock, flags);
615 407
616 vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK, 408 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
617 DSP_HOLD_VALID_INTR_EN(1));
618 409
619 spin_unlock_irqrestore(&vop->irq_lock, flags); 410 spin_unlock_irqrestore(&vop->irq_lock, flags);
620} 411}
@@ -628,8 +419,7 @@ static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
628 419
629 spin_lock_irqsave(&vop->irq_lock, flags); 420 spin_lock_irqsave(&vop->irq_lock, flags);
630 421
631 vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK, 422 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
632 DSP_HOLD_VALID_INTR_EN(0));
633 423
634 spin_unlock_irqrestore(&vop->irq_lock, flags); 424 spin_unlock_irqrestore(&vop->irq_lock, flags);
635} 425}
@@ -692,7 +482,7 @@ static void vop_enable(struct drm_crtc *crtc)
692 482
693 enable_irq(vop->irq); 483 enable_irq(vop->irq);
694 484
695 drm_vblank_on(vop->drm_dev, vop->pipe); 485 drm_crtc_vblank_on(crtc);
696 486
697 return; 487 return;
698 488
@@ -704,14 +494,14 @@ err_disable_hclk:
704 clk_disable(vop->hclk); 494 clk_disable(vop->hclk);
705} 495}
706 496
707static void vop_disable(struct drm_crtc *crtc) 497static void vop_crtc_disable(struct drm_crtc *crtc)
708{ 498{
709 struct vop *vop = to_vop(crtc); 499 struct vop *vop = to_vop(crtc);
710 500
711 if (!vop->is_enabled) 501 if (!vop->is_enabled)
712 return; 502 return;
713 503
714 drm_vblank_off(crtc->dev, vop->pipe); 504 drm_crtc_vblank_off(crtc);
715 505
716 /* 506 /*
717 * Vop standby will take effect at end of current frame, 507 * Vop standby will take effect at end of current frame,
@@ -748,224 +538,188 @@ static void vop_disable(struct drm_crtc *crtc)
748 pm_runtime_put(vop->dev); 538 pm_runtime_put(vop->dev);
749} 539}
750 540
751/* 541static void vop_plane_destroy(struct drm_plane *plane)
752 * Caller must hold vsync_mutex.
753 */
754static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
755{
756 struct vop_win_state *last;
757 struct vop_win_state *active = vop_win->active;
758
759 if (list_empty(&vop_win->pending))
760 return active ? active->fb : NULL;
761
762 last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
763 return last ? last->fb : NULL;
764}
765
766/*
767 * Caller must hold vsync_mutex.
768 */
769static int vop_win_queue_fb(struct vop_win *vop_win,
770 struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
771 struct drm_pending_vblank_event *event)
772{ 542{
773 struct vop_win_state *state; 543 drm_plane_cleanup(plane);
774
775 state = kzalloc(sizeof(*state), GFP_KERNEL);
776 if (!state)
777 return -ENOMEM;
778
779 state->fb = fb;
780 state->yrgb_mst = yrgb_mst;
781 state->event = event;
782
783 list_add_tail(&state->head, &vop_win->pending);
784
785 return 0;
786} 544}
787 545
788static int vop_update_plane_event(struct drm_plane *plane, 546static int vop_plane_atomic_check(struct drm_plane *plane,
789 struct drm_crtc *crtc, 547 struct drm_plane_state *state)
790 struct drm_framebuffer *fb, int crtc_x,
791 int crtc_y, unsigned int crtc_w,
792 unsigned int crtc_h, uint32_t src_x,
793 uint32_t src_y, uint32_t src_w,
794 uint32_t src_h,
795 struct drm_pending_vblank_event *event)
796{ 548{
549 struct drm_crtc *crtc = state->crtc;
550 struct drm_framebuffer *fb = state->fb;
797 struct vop_win *vop_win = to_vop_win(plane); 551 struct vop_win *vop_win = to_vop_win(plane);
552 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
798 const struct vop_win_data *win = vop_win->data; 553 const struct vop_win_data *win = vop_win->data;
799 struct vop *vop = to_vop(crtc);
800 struct drm_gem_object *obj;
801 struct rockchip_gem_object *rk_obj;
802 struct drm_gem_object *uv_obj;
803 struct rockchip_gem_object *rk_uv_obj;
804 unsigned long offset;
805 unsigned int actual_w;
806 unsigned int actual_h;
807 unsigned int dsp_stx;
808 unsigned int dsp_sty;
809 unsigned int y_vir_stride;
810 unsigned int uv_vir_stride = 0;
811 dma_addr_t yrgb_mst;
812 dma_addr_t uv_mst = 0;
813 enum vop_data_format format;
814 uint32_t val;
815 bool is_alpha;
816 bool rb_swap;
817 bool is_yuv;
818 bool visible; 554 bool visible;
819 int ret; 555 int ret;
820 struct drm_rect dest = { 556 struct drm_rect *dest = &vop_plane_state->dest;
821 .x1 = crtc_x, 557 struct drm_rect *src = &vop_plane_state->src;
822 .y1 = crtc_y, 558 struct drm_rect clip;
823 .x2 = crtc_x + crtc_w,
824 .y2 = crtc_y + crtc_h,
825 };
826 struct drm_rect src = {
827 /* 16.16 fixed point */
828 .x1 = src_x,
829 .y1 = src_y,
830 .x2 = src_x + src_w,
831 .y2 = src_y + src_h,
832 };
833 const struct drm_rect clip = {
834 .x2 = crtc->mode.hdisplay,
835 .y2 = crtc->mode.vdisplay,
836 };
837 bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
838 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : 559 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
839 DRM_PLANE_HELPER_NO_SCALING; 560 DRM_PLANE_HELPER_NO_SCALING;
840 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 561 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
841 DRM_PLANE_HELPER_NO_SCALING; 562 DRM_PLANE_HELPER_NO_SCALING;
842 563
843 ret = drm_plane_helper_check_update(plane, crtc, fb, 564 crtc = crtc ? crtc : plane->state->crtc;
844 &src, &dest, &clip, 565 /*
566 * Both crtc or plane->state->crtc can be null.
567 */
568 if (!crtc || !fb)
569 goto out_disable;
570 src->x1 = state->src_x;
571 src->y1 = state->src_y;
572 src->x2 = state->src_x + state->src_w;
573 src->y2 = state->src_y + state->src_h;
574 dest->x1 = state->crtc_x;
575 dest->y1 = state->crtc_y;
576 dest->x2 = state->crtc_x + state->crtc_w;
577 dest->y2 = state->crtc_y + state->crtc_h;
578
579 clip.x1 = 0;
580 clip.y1 = 0;
581 clip.x2 = crtc->mode.hdisplay;
582 clip.y2 = crtc->mode.vdisplay;
583
584 ret = drm_plane_helper_check_update(plane, crtc, state->fb,
585 src, dest, &clip,
845 min_scale, 586 min_scale,
846 max_scale, 587 max_scale,
847 can_position, false, &visible); 588 true, true, &visible);
848 if (ret) 589 if (ret)
849 return ret; 590 return ret;
850 591
851 if (!visible) 592 if (!visible)
852 return 0; 593 goto out_disable;
853 594
854 is_alpha = is_alpha_support(fb->pixel_format); 595 vop_plane_state->format = vop_convert_format(fb->pixel_format);
855 rb_swap = has_rb_swapped(fb->pixel_format); 596 if (vop_plane_state->format < 0)
856 is_yuv = is_yuv_support(fb->pixel_format); 597 return vop_plane_state->format;
857
858 format = vop_convert_format(fb->pixel_format);
859 if (format < 0)
860 return format;
861 598
862 obj = rockchip_fb_get_gem_obj(fb, 0); 599 /*
863 if (!obj) { 600 * Src.x1 can be odd when do clip, but yuv plane start point
864 DRM_ERROR("fail to get rockchip gem object from framebuffer\n"); 601 * need align with 2 pixel.
602 */
603 if (is_yuv_support(fb->pixel_format) && ((src->x1 >> 16) % 2))
865 return -EINVAL; 604 return -EINVAL;
866 }
867 605
868 rk_obj = to_rockchip_obj(obj); 606 vop_plane_state->enable = true;
869 607
870 if (is_yuv) { 608 return 0;
871 /*
872 * Src.x1 can be odd when do clip, but yuv plane start point
873 * need align with 2 pixel.
874 */
875 val = (src.x1 >> 16) % 2;
876 src.x1 += val << 16;
877 src.x2 += val << 16;
878 }
879 609
880 actual_w = (src.x2 - src.x1) >> 16; 610out_disable:
881 actual_h = (src.y2 - src.y1) >> 16; 611 vop_plane_state->enable = false;
612 return 0;
613}
882 614
883 dsp_stx = dest.x1 + crtc->mode.htotal - crtc->mode.hsync_start; 615static void vop_plane_atomic_disable(struct drm_plane *plane,
884 dsp_sty = dest.y1 + crtc->mode.vtotal - crtc->mode.vsync_start; 616 struct drm_plane_state *old_state)
617{
618 struct vop_plane_state *vop_plane_state = to_vop_plane_state(old_state);
619 struct vop_win *vop_win = to_vop_win(plane);
620 const struct vop_win_data *win = vop_win->data;
621 struct vop *vop = to_vop(old_state->crtc);
885 622
886 offset = (src.x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0); 623 if (!old_state->crtc)
887 offset += (src.y1 >> 16) * fb->pitches[0]; 624 return;
888 625
889 yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0]; 626 spin_lock(&vop->reg_lock);
890 y_vir_stride = fb->pitches[0] >> 2;
891 627
892 if (is_yuv) { 628 VOP_WIN_SET(vop, win, enable, 0);
893 int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
894 int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
895 int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
896 629
897 uv_obj = rockchip_fb_get_gem_obj(fb, 1); 630 spin_unlock(&vop->reg_lock);
898 if (!uv_obj) {
899 DRM_ERROR("fail to get uv object from framebuffer\n");
900 return -EINVAL;
901 }
902 rk_uv_obj = to_rockchip_obj(uv_obj);
903 uv_vir_stride = fb->pitches[1] >> 2;
904 631
905 offset = (src.x1 >> 16) * bpp / hsub; 632 vop_plane_state->enable = false;
906 offset += (src.y1 >> 16) * fb->pitches[1] / vsub; 633}
907 634
908 uv_mst = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 635static void vop_plane_atomic_update(struct drm_plane *plane,
909 } 636 struct drm_plane_state *old_state)
637{
638 struct drm_plane_state *state = plane->state;
639 struct drm_crtc *crtc = state->crtc;
640 struct vop_win *vop_win = to_vop_win(plane);
641 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
642 const struct vop_win_data *win = vop_win->data;
643 struct vop *vop = to_vop(state->crtc);
644 struct drm_framebuffer *fb = state->fb;
645 unsigned int actual_w, actual_h;
646 unsigned int dsp_stx, dsp_sty;
647 uint32_t act_info, dsp_info, dsp_st;
648 struct drm_rect *src = &vop_plane_state->src;
649 struct drm_rect *dest = &vop_plane_state->dest;
650 struct drm_gem_object *obj, *uv_obj;
651 struct rockchip_gem_object *rk_obj, *rk_uv_obj;
652 unsigned long offset;
653 dma_addr_t dma_addr;
654 uint32_t val;
655 bool rb_swap;
910 656
911 /* 657 /*
912 * If this plane update changes the plane's framebuffer, (or more 658 * can't update plane when vop is disabled.
913 * precisely, if this update has a different framebuffer than the last
914 * update), enqueue it so we can track when it completes.
915 *
916 * Only when we discover that this update has completed, can we
917 * unreference any previous framebuffers.
918 */ 659 */
919 mutex_lock(&vop->vsync_mutex); 660 if (!crtc)
920 if (fb != vop_win_last_pending_fb(vop_win)) { 661 return;
921 ret = drm_vblank_get(plane->dev, vop->pipe);
922 if (ret) {
923 DRM_ERROR("failed to get vblank, %d\n", ret);
924 mutex_unlock(&vop->vsync_mutex);
925 return ret;
926 }
927
928 drm_framebuffer_reference(fb);
929 662
930 ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event); 663 if (WARN_ON(!vop->is_enabled))
931 if (ret) { 664 return;
932 drm_vblank_put(plane->dev, vop->pipe);
933 mutex_unlock(&vop->vsync_mutex);
934 return ret;
935 }
936 665
937 vop->vsync_work_pending = true; 666 if (!vop_plane_state->enable) {
667 vop_plane_atomic_disable(plane, old_state);
668 return;
938 } 669 }
939 mutex_unlock(&vop->vsync_mutex); 670
671 obj = rockchip_fb_get_gem_obj(fb, 0);
672 rk_obj = to_rockchip_obj(obj);
673
674 actual_w = drm_rect_width(src) >> 16;
675 actual_h = drm_rect_height(src) >> 16;
676 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
677
678 dsp_info = (drm_rect_height(dest) - 1) << 16;
679 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
680
681 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
682 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
683 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
684
685 offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
686 offset += (src->y1 >> 16) * fb->pitches[0];
687 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
940 688
941 spin_lock(&vop->reg_lock); 689 spin_lock(&vop->reg_lock);
942 690
943 VOP_WIN_SET(vop, win, format, format); 691 VOP_WIN_SET(vop, win, format, vop_plane_state->format);
944 VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride); 692 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
945 VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst); 693 VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
946 if (is_yuv) { 694 if (is_yuv_support(fb->pixel_format)) {
947 VOP_WIN_SET(vop, win, uv_vir, uv_vir_stride); 695 int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
948 VOP_WIN_SET(vop, win, uv_mst, uv_mst); 696 int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
697 int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
698
699 uv_obj = rockchip_fb_get_gem_obj(fb, 1);
700 rk_uv_obj = to_rockchip_obj(uv_obj);
701
702 offset = (src->x1 >> 16) * bpp / hsub;
703 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
704
705 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
706 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
707 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
949 } 708 }
950 709
951 if (win->phy->scl) 710 if (win->phy->scl)
952 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h, 711 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
953 dest.x2 - dest.x1, dest.y2 - dest.y1, 712 drm_rect_width(dest), drm_rect_height(dest),
954 fb->pixel_format); 713 fb->pixel_format);
955 714
956 val = (actual_h - 1) << 16; 715 VOP_WIN_SET(vop, win, act_info, act_info);
957 val |= (actual_w - 1) & 0xffff; 716 VOP_WIN_SET(vop, win, dsp_info, dsp_info);
958 VOP_WIN_SET(vop, win, act_info, val); 717 VOP_WIN_SET(vop, win, dsp_st, dsp_st);
959 718
960 val = (dest.y2 - dest.y1 - 1) << 16; 719 rb_swap = has_rb_swapped(fb->pixel_format);
961 val |= (dest.x2 - dest.x1 - 1) & 0xffff;
962 VOP_WIN_SET(vop, win, dsp_info, val);
963 val = dsp_sty << 16;
964 val |= dsp_stx & 0xffff;
965 VOP_WIN_SET(vop, win, dsp_st, val);
966 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 720 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
967 721
968 if (is_alpha) { 722 if (is_alpha_support(fb->pixel_format)) {
969 VOP_WIN_SET(vop, win, dst_alpha_ctl, 723 VOP_WIN_SET(vop, win, dst_alpha_ctl,
970 DST_FACTOR_M0(ALPHA_SRC_INVERSE)); 724 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
971 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | 725 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
@@ -979,86 +733,70 @@ static int vop_update_plane_event(struct drm_plane *plane,
979 } 733 }
980 734
981 VOP_WIN_SET(vop, win, enable, 1); 735 VOP_WIN_SET(vop, win, enable, 1);
982
983 vop_cfg_done(vop);
984 spin_unlock(&vop->reg_lock); 736 spin_unlock(&vop->reg_lock);
985
986 return 0;
987} 737}
988 738
989static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 739static const struct drm_plane_helper_funcs plane_helper_funcs = {
990 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 740 .atomic_check = vop_plane_atomic_check,
991 unsigned int crtc_w, unsigned int crtc_h, 741 .atomic_update = vop_plane_atomic_update,
992 uint32_t src_x, uint32_t src_y, uint32_t src_w, 742 .atomic_disable = vop_plane_atomic_disable,
993 uint32_t src_h) 743};
994{
995 return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
996 crtc_h, src_x, src_y, src_w, src_h,
997 NULL);
998}
999 744
1000static int vop_update_primary_plane(struct drm_crtc *crtc, 745void vop_atomic_plane_reset(struct drm_plane *plane)
1001 struct drm_pending_vblank_event *event)
1002{ 746{
1003 unsigned int crtc_w, crtc_h; 747 struct vop_plane_state *vop_plane_state =
748 to_vop_plane_state(plane->state);
1004 749
1005 crtc_w = crtc->primary->fb->width - crtc->x; 750 if (plane->state && plane->state->fb)
1006 crtc_h = crtc->primary->fb->height - crtc->y; 751 drm_framebuffer_unreference(plane->state->fb);
1007 752
1008 return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb, 753 kfree(vop_plane_state);
1009 0, 0, crtc_w, crtc_h, crtc->x << 16, 754 vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
1010 crtc->y << 16, crtc_w << 16, 755 if (!vop_plane_state)
1011 crtc_h << 16, event); 756 return;
757
758 plane->state = &vop_plane_state->base;
759 plane->state->plane = plane;
1012} 760}
1013 761
1014static int vop_disable_plane(struct drm_plane *plane) 762struct drm_plane_state *
763vop_atomic_plane_duplicate_state(struct drm_plane *plane)
1015{ 764{
1016 struct vop_win *vop_win = to_vop_win(plane); 765 struct vop_plane_state *old_vop_plane_state;
1017 const struct vop_win_data *win = vop_win->data; 766 struct vop_plane_state *vop_plane_state;
1018 struct vop *vop;
1019 int ret;
1020
1021 if (!plane->crtc)
1022 return 0;
1023
1024 vop = to_vop(plane->crtc);
1025 767
1026 ret = drm_vblank_get(plane->dev, vop->pipe); 768 if (WARN_ON(!plane->state))
1027 if (ret) { 769 return NULL;
1028 DRM_ERROR("failed to get vblank, %d\n", ret);
1029 return ret;
1030 }
1031
1032 mutex_lock(&vop->vsync_mutex);
1033
1034 ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
1035 if (ret) {
1036 drm_vblank_put(plane->dev, vop->pipe);
1037 mutex_unlock(&vop->vsync_mutex);
1038 return ret;
1039 }
1040 770
1041 vop->vsync_work_pending = true; 771 old_vop_plane_state = to_vop_plane_state(plane->state);
1042 mutex_unlock(&vop->vsync_mutex); 772 vop_plane_state = kmemdup(old_vop_plane_state,
773 sizeof(*vop_plane_state), GFP_KERNEL);
774 if (!vop_plane_state)
775 return NULL;
1043 776
1044 spin_lock(&vop->reg_lock); 777 __drm_atomic_helper_plane_duplicate_state(plane,
1045 VOP_WIN_SET(vop, win, enable, 0); 778 &vop_plane_state->base);
1046 vop_cfg_done(vop);
1047 spin_unlock(&vop->reg_lock);
1048 779
1049 return 0; 780 return &vop_plane_state->base;
1050} 781}
1051 782
1052static void vop_plane_destroy(struct drm_plane *plane) 783static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
784 struct drm_plane_state *state)
1053{ 785{
1054 vop_disable_plane(plane); 786 struct vop_plane_state *vop_state = to_vop_plane_state(state);
1055 drm_plane_cleanup(plane); 787
788 __drm_atomic_helper_plane_destroy_state(plane, state);
789
790 kfree(vop_state);
1056} 791}
1057 792
1058static const struct drm_plane_funcs vop_plane_funcs = { 793static const struct drm_plane_funcs vop_plane_funcs = {
1059 .update_plane = vop_update_plane, 794 .update_plane = drm_atomic_helper_update_plane,
1060 .disable_plane = vop_disable_plane, 795 .disable_plane = drm_atomic_helper_disable_plane,
1061 .destroy = vop_plane_destroy, 796 .destroy = vop_plane_destroy,
797 .reset = vop_atomic_plane_reset,
798 .atomic_duplicate_state = vop_atomic_plane_duplicate_state,
799 .atomic_destroy_state = vop_atomic_plane_destroy_state,
1062}; 800};
1063 801
1064int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, 802int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
@@ -1067,8 +805,27 @@ int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
1067{ 805{
1068 struct vop *vop = to_vop(crtc); 806 struct vop *vop = to_vop(crtc);
1069 807
1070 vop->connector_type = connector_type; 808 if (WARN_ON(!vop->is_enabled))
1071 vop->connector_out_mode = out_mode; 809 return -EINVAL;
810
811 switch (connector_type) {
812 case DRM_MODE_CONNECTOR_LVDS:
813 VOP_CTRL_SET(vop, rgb_en, 1);
814 break;
815 case DRM_MODE_CONNECTOR_eDP:
816 VOP_CTRL_SET(vop, edp_en, 1);
817 break;
818 case DRM_MODE_CONNECTOR_HDMIA:
819 VOP_CTRL_SET(vop, hdmi_en, 1);
820 break;
821 case DRM_MODE_CONNECTOR_DSI:
822 VOP_CTRL_SET(vop, mipi_en, 1);
823 break;
824 default:
825 DRM_ERROR("unsupport connector_type[%d]\n", connector_type);
826 return -EINVAL;
827 };
828 VOP_CTRL_SET(vop, out_mode, out_mode);
1072 829
1073 return 0; 830 return 0;
1074} 831}
@@ -1079,12 +836,12 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
1079 struct vop *vop = to_vop(crtc); 836 struct vop *vop = to_vop(crtc);
1080 unsigned long flags; 837 unsigned long flags;
1081 838
1082 if (!vop->is_enabled) 839 if (WARN_ON(!vop->is_enabled))
1083 return -EPERM; 840 return -EPERM;
1084 841
1085 spin_lock_irqsave(&vop->irq_lock, flags); 842 spin_lock_irqsave(&vop->irq_lock, flags);
1086 843
1087 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1)); 844 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
1088 845
1089 spin_unlock_irqrestore(&vop->irq_lock, flags); 846 spin_unlock_irqrestore(&vop->irq_lock, flags);
1090 847
@@ -1096,76 +853,49 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
1096 struct vop *vop = to_vop(crtc); 853 struct vop *vop = to_vop(crtc);
1097 unsigned long flags; 854 unsigned long flags;
1098 855
1099 if (!vop->is_enabled) 856 if (WARN_ON(!vop->is_enabled))
1100 return; 857 return;
1101 858
1102 spin_lock_irqsave(&vop->irq_lock, flags); 859 spin_lock_irqsave(&vop->irq_lock, flags);
1103 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0)); 860
861 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
862
1104 spin_unlock_irqrestore(&vop->irq_lock, flags); 863 spin_unlock_irqrestore(&vop->irq_lock, flags);
1105} 864}
1106 865
1107static const struct rockchip_crtc_funcs private_crtc_funcs = { 866static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
1108 .enable_vblank = vop_crtc_enable_vblank,
1109 .disable_vblank = vop_crtc_disable_vblank,
1110};
1111
1112static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
1113{ 867{
1114 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); 868 struct vop *vop = to_vop(crtc);
1115 869
1116 switch (mode) { 870 reinit_completion(&vop->wait_update_complete);
1117 case DRM_MODE_DPMS_ON: 871 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
1118 vop_enable(crtc);
1119 break;
1120 case DRM_MODE_DPMS_STANDBY:
1121 case DRM_MODE_DPMS_SUSPEND:
1122 case DRM_MODE_DPMS_OFF:
1123 vop_disable(crtc);
1124 break;
1125 default:
1126 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
1127 break;
1128 }
1129} 872}
1130 873
1131static void vop_crtc_prepare(struct drm_crtc *crtc) 874static const struct rockchip_crtc_funcs private_crtc_funcs = {
1132{ 875 .enable_vblank = vop_crtc_enable_vblank,
1133 vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 876 .disable_vblank = vop_crtc_disable_vblank,
1134} 877 .wait_for_update = vop_crtc_wait_for_update,
878};
1135 879
1136static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 880static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
1137 const struct drm_display_mode *mode, 881 const struct drm_display_mode *mode,
1138 struct drm_display_mode *adjusted_mode) 882 struct drm_display_mode *adjusted_mode)
1139{ 883{
884 struct vop *vop = to_vop(crtc);
885
1140 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0) 886 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
1141 return false; 887 return false;
1142 888
1143 return true; 889 adjusted_mode->clock =
1144} 890 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
1145
1146static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1147 struct drm_framebuffer *old_fb)
1148{
1149 int ret;
1150
1151 crtc->x = x;
1152 crtc->y = y;
1153 891
1154 ret = vop_update_primary_plane(crtc, NULL); 892 return true;
1155 if (ret < 0) {
1156 DRM_ERROR("fail to update plane\n");
1157 return ret;
1158 }
1159
1160 return 0;
1161} 893}
1162 894
1163static int vop_crtc_mode_set(struct drm_crtc *crtc, 895static void vop_crtc_enable(struct drm_crtc *crtc)
1164 struct drm_display_mode *mode,
1165 struct drm_display_mode *adjusted_mode,
1166 int x, int y, struct drm_framebuffer *fb)
1167{ 896{
1168 struct vop *vop = to_vop(crtc); 897 struct vop *vop = to_vop(crtc);
898 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
1169 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 899 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
1170 u16 hdisplay = adjusted_mode->hdisplay; 900 u16 hdisplay = adjusted_mode->hdisplay;
1171 u16 htotal = adjusted_mode->htotal; 901 u16 htotal = adjusted_mode->htotal;
@@ -1176,32 +906,44 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
1176 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; 906 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
1177 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; 907 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
1178 u16 vact_end = vact_st + vdisplay; 908 u16 vact_end = vact_st + vdisplay;
1179 int ret, ret_clk;
1180 uint32_t val; 909 uint32_t val;
1181 910
911 vop_enable(crtc);
1182 /* 912 /*
1183 * disable dclk to stop frame scan, so that we can safe config mode and 913 * If dclk rate is zero, mean that scanout is stop,
1184 * enable iommu. 914 * we don't need wait any more.
1185 */ 915 */
1186 clk_disable(vop->dclk); 916 if (clk_get_rate(vop->dclk)) {
917 /*
918 * Rk3288 vop timing register is immediately, when configure
919 * display timing on display time, may cause tearing.
920 *
921 * Vop standby will take effect at end of current frame,
922 * if dsp hold valid irq happen, it means standby complete.
923 *
924 * mode set:
925 * standby and wait complete --> |----
926 * | display time
927 * |----
928 * |---> dsp hold irq
929 * configure display timing --> |
930 * standby exit |
931 * | new frame start.
932 */
1187 933
1188 switch (vop->connector_type) { 934 reinit_completion(&vop->dsp_hold_completion);
1189 case DRM_MODE_CONNECTOR_LVDS: 935 vop_dsp_hold_valid_irq_enable(vop);
1190 VOP_CTRL_SET(vop, rgb_en, 1); 936
1191 break; 937 spin_lock(&vop->reg_lock);
1192 case DRM_MODE_CONNECTOR_eDP: 938
1193 VOP_CTRL_SET(vop, edp_en, 1); 939 VOP_CTRL_SET(vop, standby, 1);
1194 break; 940
1195 case DRM_MODE_CONNECTOR_HDMIA: 941 spin_unlock(&vop->reg_lock);
1196 VOP_CTRL_SET(vop, hdmi_en, 1); 942
1197 break; 943 wait_for_completion(&vop->dsp_hold_completion);
1198 default: 944
1199 DRM_ERROR("unsupport connector_type[%d]\n", 945 vop_dsp_hold_valid_irq_disable(vop);
1200 vop->connector_type); 946 }
1201 ret = -EINVAL;
1202 goto out;
1203 };
1204 VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
1205 947
1206 val = 0x8; 948 val = 0x8;
1207 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; 949 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
@@ -1220,211 +962,119 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
1220 VOP_CTRL_SET(vop, vact_st_end, val); 962 VOP_CTRL_SET(vop, vact_st_end, val);
1221 VOP_CTRL_SET(vop, vpost_st_end, val); 963 VOP_CTRL_SET(vop, vpost_st_end, val);
1222 964
1223 ret = vop_crtc_mode_set_base(crtc, x, y, fb);
1224 if (ret)
1225 goto out;
1226
1227 /*
1228 * reset dclk, take all mode config affect, so the clk would run in
1229 * correct frame.
1230 */
1231 reset_control_assert(vop->dclk_rst);
1232 usleep_range(10, 20);
1233 reset_control_deassert(vop->dclk_rst);
1234
1235 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); 965 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
1236out:
1237 ret_clk = clk_enable(vop->dclk);
1238 if (ret_clk < 0) {
1239 dev_err(vop->dev, "failed to enable dclk - %d\n", ret_clk);
1240 return ret_clk;
1241 }
1242 966
1243 return ret; 967 VOP_CTRL_SET(vop, standby, 0);
1244}
1245
1246static void vop_crtc_commit(struct drm_crtc *crtc)
1247{
1248} 968}
1249 969
1250static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 970static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
1251 .dpms = vop_crtc_dpms, 971 struct drm_crtc_state *old_crtc_state)
1252 .prepare = vop_crtc_prepare,
1253 .mode_fixup = vop_crtc_mode_fixup,
1254 .mode_set = vop_crtc_mode_set,
1255 .mode_set_base = vop_crtc_mode_set_base,
1256 .commit = vop_crtc_commit,
1257};
1258
1259static int vop_crtc_page_flip(struct drm_crtc *crtc,
1260 struct drm_framebuffer *fb,
1261 struct drm_pending_vblank_event *event,
1262 uint32_t page_flip_flags)
1263{ 972{
1264 struct vop *vop = to_vop(crtc); 973 struct vop *vop = to_vop(crtc);
1265 struct drm_framebuffer *old_fb = crtc->primary->fb;
1266 int ret;
1267 974
1268 /* when the page flip is requested, crtc should be on */ 975 if (WARN_ON(!vop->is_enabled))
1269 if (!vop->is_enabled) { 976 return;
1270 DRM_DEBUG("page flip request rejected because crtc is off.\n");
1271 return 0;
1272 }
1273 977
1274 crtc->primary->fb = fb; 978 spin_lock(&vop->reg_lock);
1275 979
1276 ret = vop_update_primary_plane(crtc, event); 980 vop_cfg_done(vop);
1277 if (ret)
1278 crtc->primary->fb = old_fb;
1279 981
1280 return ret; 982 spin_unlock(&vop->reg_lock);
1281} 983}
1282 984
1283static void vop_win_state_complete(struct vop_win *vop_win, 985static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1284 struct vop_win_state *state) 986 struct drm_crtc_state *old_crtc_state)
1285{ 987{
1286 struct vop *vop = vop_win->vop; 988 struct vop *vop = to_vop(crtc);
1287 struct drm_crtc *crtc = &vop->crtc;
1288 struct drm_device *drm = crtc->dev;
1289 unsigned long flags;
1290 989
1291 if (state->event) { 990 if (crtc->state->event) {
1292 spin_lock_irqsave(&drm->event_lock, flags); 991 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1293 drm_crtc_send_vblank_event(crtc, state->event);
1294 spin_unlock_irqrestore(&drm->event_lock, flags);
1295 }
1296 992
1297 list_del(&state->head); 993 vop->event = crtc->state->event;
1298 drm_vblank_put(crtc->dev, vop->pipe); 994 crtc->state->event = NULL;
995 }
1299} 996}
1300 997
998static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
999 .enable = vop_crtc_enable,
1000 .disable = vop_crtc_disable,
1001 .mode_fixup = vop_crtc_mode_fixup,
1002 .atomic_flush = vop_crtc_atomic_flush,
1003 .atomic_begin = vop_crtc_atomic_begin,
1004};
1005
1301static void vop_crtc_destroy(struct drm_crtc *crtc) 1006static void vop_crtc_destroy(struct drm_crtc *crtc)
1302{ 1007{
1303 drm_crtc_cleanup(crtc); 1008 drm_crtc_cleanup(crtc);
1304} 1009}
1305 1010
1306static const struct drm_crtc_funcs vop_crtc_funcs = { 1011static const struct drm_crtc_funcs vop_crtc_funcs = {
1307 .set_config = drm_crtc_helper_set_config, 1012 .set_config = drm_atomic_helper_set_config,
1308 .page_flip = vop_crtc_page_flip, 1013 .page_flip = drm_atomic_helper_page_flip,
1309 .destroy = vop_crtc_destroy, 1014 .destroy = vop_crtc_destroy,
1015 .reset = drm_atomic_helper_crtc_reset,
1016 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
1017 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
1310}; 1018};
1311 1019
1312static bool vop_win_state_is_active(struct vop_win *vop_win, 1020static bool vop_win_pending_is_complete(struct vop_win *vop_win)
1313 struct vop_win_state *state)
1314{ 1021{
1315 bool active = false; 1022 struct drm_plane *plane = &vop_win->base;
1316 1023 struct vop_plane_state *state = to_vop_plane_state(plane->state);
1317 if (state->fb) { 1024 dma_addr_t yrgb_mst;
1318 dma_addr_t yrgb_mst;
1319
1320 /* check yrgb_mst to tell if pending_fb is now front */
1321 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
1322
1323 active = (yrgb_mst == state->yrgb_mst);
1324 } else {
1325 bool enabled;
1326
1327 /* if enable bit is clear, plane is now disabled */
1328 enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
1329
1330 active = (enabled == 0);
1331 }
1332 1025
1333 return active; 1026 if (!state->enable)
1334} 1027 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
1335 1028
1336static void vop_win_state_destroy(struct vop_win_state *state) 1029 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
1337{
1338 struct drm_framebuffer *fb = state->fb;
1339 1030
1340 if (fb) 1031 return yrgb_mst == state->yrgb_mst;
1341 drm_framebuffer_unreference(fb);
1342
1343 kfree(state);
1344} 1032}
1345 1033
1346static void vop_win_update_state(struct vop_win *vop_win) 1034static void vop_handle_vblank(struct vop *vop)
1347{ 1035{
1348 struct vop_win_state *state, *n, *new_active = NULL; 1036 struct drm_device *drm = vop->drm_dev;
1349 1037 struct drm_crtc *crtc = &vop->crtc;
1350 /* Check if any pending states are now active */ 1038 unsigned long flags;
1351 list_for_each_entry(state, &vop_win->pending, head) 1039 int i;
1352 if (vop_win_state_is_active(vop_win, state)) {
1353 new_active = state;
1354 break;
1355 }
1356
1357 if (!new_active)
1358 return;
1359 1040
1360 /* 1041 for (i = 0; i < vop->data->win_size; i++) {
1361 * Destroy any 'skipped' pending states - states that were queued 1042 if (!vop_win_pending_is_complete(&vop->win[i]))
1362 * before the newly active state. 1043 return;
1363 */
1364 list_for_each_entry_safe(state, n, &vop_win->pending, head) {
1365 if (state == new_active)
1366 break;
1367 vop_win_state_complete(vop_win, state);
1368 vop_win_state_destroy(state);
1369 } 1044 }
1370 1045
1371 vop_win_state_complete(vop_win, new_active); 1046 if (vop->event) {
1372 1047 spin_lock_irqsave(&drm->event_lock, flags);
1373 if (vop_win->active)
1374 vop_win_state_destroy(vop_win->active);
1375 vop_win->active = new_active;
1376}
1377
1378static bool vop_win_has_pending_state(struct vop_win *vop_win)
1379{
1380 return !list_empty(&vop_win->pending);
1381}
1382
1383static irqreturn_t vop_isr_thread(int irq, void *data)
1384{
1385 struct vop *vop = data;
1386 const struct vop_data *vop_data = vop->data;
1387 unsigned int i;
1388
1389 mutex_lock(&vop->vsync_mutex);
1390
1391 if (!vop->vsync_work_pending)
1392 goto done;
1393 1048
1394 vop->vsync_work_pending = false; 1049 drm_crtc_send_vblank_event(crtc, vop->event);
1050 drm_crtc_vblank_put(crtc);
1051 vop->event = NULL;
1395 1052
1396 for (i = 0; i < vop_data->win_size; i++) { 1053 spin_unlock_irqrestore(&drm->event_lock, flags);
1397 struct vop_win *vop_win = &vop->win[i];
1398
1399 vop_win_update_state(vop_win);
1400 if (vop_win_has_pending_state(vop_win))
1401 vop->vsync_work_pending = true;
1402 } 1054 }
1403 1055 if (!completion_done(&vop->wait_update_complete))
1404done: 1056 complete(&vop->wait_update_complete);
1405 mutex_unlock(&vop->vsync_mutex);
1406
1407 return IRQ_HANDLED;
1408} 1057}
1409 1058
1410static irqreturn_t vop_isr(int irq, void *data) 1059static irqreturn_t vop_isr(int irq, void *data)
1411{ 1060{
1412 struct vop *vop = data; 1061 struct vop *vop = data;
1413 uint32_t intr0_reg, active_irqs; 1062 struct drm_crtc *crtc = &vop->crtc;
1063 uint32_t active_irqs;
1414 unsigned long flags; 1064 unsigned long flags;
1415 int ret = IRQ_NONE; 1065 int ret = IRQ_NONE;
1416 1066
1417 /* 1067 /*
1418 * INTR_CTRL0 register has interrupt status, enable and clear bits, we 1068 * interrupt register has interrupt status, enable and clear bits, we
1419 * must hold irq_lock to avoid a race with enable/disable_vblank(). 1069 * must hold irq_lock to avoid a race with enable/disable_vblank().
1420 */ 1070 */
1421 spin_lock_irqsave(&vop->irq_lock, flags); 1071 spin_lock_irqsave(&vop->irq_lock, flags);
1422 intr0_reg = vop_readl(vop, INTR_CTRL0); 1072
1423 active_irqs = intr0_reg & INTR_MASK; 1073 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
1424 /* Clear all active interrupt sources */ 1074 /* Clear all active interrupt sources */
1425 if (active_irqs) 1075 if (active_irqs)
1426 vop_writel(vop, INTR_CTRL0, 1076 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
1427 intr0_reg | (active_irqs << INTR_CLR_SHIFT)); 1077
1428 spin_unlock_irqrestore(&vop->irq_lock, flags); 1078 spin_unlock_irqrestore(&vop->irq_lock, flags);
1429 1079
1430 /* This is expected for vop iommu irqs, since the irq is shared */ 1080 /* This is expected for vop iommu irqs, since the irq is shared */
@@ -1438,9 +1088,10 @@ static irqreturn_t vop_isr(int irq, void *data)
1438 } 1088 }
1439 1089
1440 if (active_irqs & FS_INTR) { 1090 if (active_irqs & FS_INTR) {
1441 drm_handle_vblank(vop->drm_dev, vop->pipe); 1091 drm_crtc_handle_vblank(crtc);
1092 vop_handle_vblank(vop);
1442 active_irqs &= ~FS_INTR; 1093 active_irqs &= ~FS_INTR;
1443 ret = (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED; 1094 ret = IRQ_HANDLED;
1444 } 1095 }
1445 1096
1446 /* Unhandled irqs are spurious. */ 1097 /* Unhandled irqs are spurious. */
@@ -1478,13 +1129,14 @@ static int vop_create_crtc(struct vop *vop)
1478 0, &vop_plane_funcs, 1129 0, &vop_plane_funcs,
1479 win_data->phy->data_formats, 1130 win_data->phy->data_formats,
1480 win_data->phy->nformats, 1131 win_data->phy->nformats,
1481 win_data->type); 1132 win_data->type, NULL);
1482 if (ret) { 1133 if (ret) {
1483 DRM_ERROR("failed to initialize plane\n"); 1134 DRM_ERROR("failed to initialize plane\n");
1484 goto err_cleanup_planes; 1135 goto err_cleanup_planes;
1485 } 1136 }
1486 1137
1487 plane = &vop_win->base; 1138 plane = &vop_win->base;
1139 drm_plane_helper_add(plane, &plane_helper_funcs);
1488 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1140 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1489 primary = plane; 1141 primary = plane;
1490 else if (plane->type == DRM_PLANE_TYPE_CURSOR) 1142 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
@@ -1492,7 +1144,7 @@ static int vop_create_crtc(struct vop *vop)
1492 } 1144 }
1493 1145
1494 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1146 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
1495 &vop_crtc_funcs); 1147 &vop_crtc_funcs, NULL);
1496 if (ret) 1148 if (ret)
1497 return ret; 1149 return ret;
1498 1150
@@ -1515,11 +1167,12 @@ static int vop_create_crtc(struct vop *vop)
1515 &vop_plane_funcs, 1167 &vop_plane_funcs,
1516 win_data->phy->data_formats, 1168 win_data->phy->data_formats,
1517 win_data->phy->nformats, 1169 win_data->phy->nformats,
1518 win_data->type); 1170 win_data->type, NULL);
1519 if (ret) { 1171 if (ret) {
1520 DRM_ERROR("failed to initialize overlay plane\n"); 1172 DRM_ERROR("failed to initialize overlay plane\n");
1521 goto err_cleanup_crtc; 1173 goto err_cleanup_crtc;
1522 } 1174 }
1175 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
1523 } 1176 }
1524 1177
1525 port = of_get_child_by_name(dev->of_node, "port"); 1178 port = of_get_child_by_name(dev->of_node, "port");
@@ -1530,9 +1183,9 @@ static int vop_create_crtc(struct vop *vop)
1530 } 1183 }
1531 1184
1532 init_completion(&vop->dsp_hold_completion); 1185 init_completion(&vop->dsp_hold_completion);
1186 init_completion(&vop->wait_update_complete);
1533 crtc->port = port; 1187 crtc->port = port;
1534 vop->pipe = drm_crtc_index(crtc); 1188 rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
1535 rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
1536 1189
1537 return 0; 1190 return 0;
1538 1191
@@ -1548,7 +1201,7 @@ static void vop_destroy_crtc(struct vop *vop)
1548{ 1201{
1549 struct drm_crtc *crtc = &vop->crtc; 1202 struct drm_crtc *crtc = &vop->crtc;
1550 1203
1551 rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe); 1204 rockchip_unregister_crtc_funcs(crtc);
1552 of_node_put(crtc->port); 1205 of_node_put(crtc->port);
1553 drm_crtc_cleanup(crtc); 1206 drm_crtc_cleanup(crtc);
1554} 1207}
@@ -1664,14 +1317,12 @@ static void vop_win_init(struct vop *vop)
1664 1317
1665 vop_win->data = win_data; 1318 vop_win->data = win_data;
1666 vop_win->vop = vop; 1319 vop_win->vop = vop;
1667 INIT_LIST_HEAD(&vop_win->pending);
1668 } 1320 }
1669} 1321}
1670 1322
1671static int vop_bind(struct device *dev, struct device *master, void *data) 1323static int vop_bind(struct device *dev, struct device *master, void *data)
1672{ 1324{
1673 struct platform_device *pdev = to_platform_device(dev); 1325 struct platform_device *pdev = to_platform_device(dev);
1674 const struct of_device_id *of_id;
1675 const struct vop_data *vop_data; 1326 const struct vop_data *vop_data;
1676 struct drm_device *drm_dev = data; 1327 struct drm_device *drm_dev = data;
1677 struct vop *vop; 1328 struct vop *vop;
@@ -1679,8 +1330,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1679 size_t alloc_size; 1330 size_t alloc_size;
1680 int ret, irq; 1331 int ret, irq;
1681 1332
1682 of_id = of_match_device(vop_driver_dt_match, dev); 1333 vop_data = of_device_get_match_data(dev);
1683 vop_data = of_id->data;
1684 if (!vop_data) 1334 if (!vop_data)
1685 return -ENODEV; 1335 return -ENODEV;
1686 1336
@@ -1725,8 +1375,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1725 1375
1726 mutex_init(&vop->vsync_mutex); 1376 mutex_init(&vop->vsync_mutex);
1727 1377
1728 ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread, 1378 ret = devm_request_irq(dev, vop->irq, vop_isr,
1729 IRQF_SHARED, dev_name(dev), vop); 1379 IRQF_SHARED, dev_name(dev), vop);
1730 if (ret) 1380 if (ret)
1731 return ret; 1381 return ret;
1732 1382
@@ -1749,42 +1399,8 @@ static void vop_unbind(struct device *dev, struct device *master, void *data)
1749 vop_destroy_crtc(vop); 1399 vop_destroy_crtc(vop);
1750} 1400}
1751 1401
1752static const struct component_ops vop_component_ops = { 1402const struct component_ops vop_component_ops = {
1753 .bind = vop_bind, 1403 .bind = vop_bind,
1754 .unbind = vop_unbind, 1404 .unbind = vop_unbind,
1755}; 1405};
1756 1406EXPORT_SYMBOL_GPL(vop_component_ops);
1757static int vop_probe(struct platform_device *pdev)
1758{
1759 struct device *dev = &pdev->dev;
1760
1761 if (!dev->of_node) {
1762 dev_err(dev, "can't find vop devices\n");
1763 return -ENODEV;
1764 }
1765
1766 return component_add(dev, &vop_component_ops);
1767}
1768
1769static int vop_remove(struct platform_device *pdev)
1770{
1771 component_del(&pdev->dev, &vop_component_ops);
1772
1773 return 0;
1774}
1775
1776struct platform_driver vop_platform_driver = {
1777 .probe = vop_probe,
1778 .remove = vop_remove,
1779 .driver = {
1780 .name = "rockchip-vop",
1781 .owner = THIS_MODULE,
1782 .of_match_table = of_match_ptr(vop_driver_dt_match),
1783 },
1784};
1785
1786module_platform_driver(vop_platform_driver);
1787
1788MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
1789MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
1790MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index a2d4ddb896fa..071ff0be7a95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -15,111 +15,125 @@
15#ifndef _ROCKCHIP_DRM_VOP_H 15#ifndef _ROCKCHIP_DRM_VOP_H
16#define _ROCKCHIP_DRM_VOP_H 16#define _ROCKCHIP_DRM_VOP_H
17 17
18/* register definition */ 18enum vop_data_format {
19#define REG_CFG_DONE 0x0000 19 VOP_FMT_ARGB8888 = 0,
20#define VERSION_INFO 0x0004 20 VOP_FMT_RGB888,
21#define SYS_CTRL 0x0008 21 VOP_FMT_RGB565,
22#define SYS_CTRL1 0x000c 22 VOP_FMT_YUV420SP = 4,
23#define DSP_CTRL0 0x0010 23 VOP_FMT_YUV422SP,
24#define DSP_CTRL1 0x0014 24 VOP_FMT_YUV444SP,
25#define DSP_BG 0x0018 25};
26#define MCU_CTRL 0x001c 26
27#define INTR_CTRL0 0x0020 27struct vop_reg_data {
28#define INTR_CTRL1 0x0024 28 uint32_t offset;
29#define WIN0_CTRL0 0x0030 29 uint32_t value;
30#define WIN0_CTRL1 0x0034 30};
31#define WIN0_COLOR_KEY 0x0038 31
32#define WIN0_VIR 0x003c 32struct vop_reg {
33#define WIN0_YRGB_MST 0x0040 33 uint32_t offset;
34#define WIN0_CBR_MST 0x0044 34 uint32_t shift;
35#define WIN0_ACT_INFO 0x0048 35 uint32_t mask;
36#define WIN0_DSP_INFO 0x004c 36};
37#define WIN0_DSP_ST 0x0050 37
38#define WIN0_SCL_FACTOR_YRGB 0x0054 38struct vop_ctrl {
39#define WIN0_SCL_FACTOR_CBR 0x0058 39 struct vop_reg standby;
40#define WIN0_SCL_OFFSET 0x005c 40 struct vop_reg data_blank;
41#define WIN0_SRC_ALPHA_CTRL 0x0060 41 struct vop_reg gate_en;
42#define WIN0_DST_ALPHA_CTRL 0x0064 42 struct vop_reg mmu_en;
43#define WIN0_FADING_CTRL 0x0068 43 struct vop_reg rgb_en;
44/* win1 register */ 44 struct vop_reg edp_en;
45#define WIN1_CTRL0 0x0070 45 struct vop_reg hdmi_en;
46#define WIN1_CTRL1 0x0074 46 struct vop_reg mipi_en;
47#define WIN1_COLOR_KEY 0x0078 47 struct vop_reg out_mode;
48#define WIN1_VIR 0x007c 48 struct vop_reg dither_down;
49#define WIN1_YRGB_MST 0x0080 49 struct vop_reg dither_up;
50#define WIN1_CBR_MST 0x0084 50 struct vop_reg pin_pol;
51#define WIN1_ACT_INFO 0x0088 51
52#define WIN1_DSP_INFO 0x008c 52 struct vop_reg htotal_pw;
53#define WIN1_DSP_ST 0x0090 53 struct vop_reg hact_st_end;
54#define WIN1_SCL_FACTOR_YRGB 0x0094 54 struct vop_reg vtotal_pw;
55#define WIN1_SCL_FACTOR_CBR 0x0098 55 struct vop_reg vact_st_end;
56#define WIN1_SCL_OFFSET 0x009c 56 struct vop_reg hpost_st_end;
57#define WIN1_SRC_ALPHA_CTRL 0x00a0 57 struct vop_reg vpost_st_end;
58#define WIN1_DST_ALPHA_CTRL 0x00a4 58
59#define WIN1_FADING_CTRL 0x00a8 59 struct vop_reg cfg_done;
60/* win2 register */ 60};
61#define WIN2_CTRL0 0x00b0 61
62#define WIN2_CTRL1 0x00b4 62struct vop_intr {
63#define WIN2_VIR0_1 0x00b8 63 const int *intrs;
64#define WIN2_VIR2_3 0x00bc 64 uint32_t nintrs;
65#define WIN2_MST0 0x00c0 65 struct vop_reg enable;
66#define WIN2_DSP_INFO0 0x00c4 66 struct vop_reg clear;
67#define WIN2_DSP_ST0 0x00c8 67 struct vop_reg status;
68#define WIN2_COLOR_KEY 0x00cc 68};
69#define WIN2_MST1 0x00d0 69
70#define WIN2_DSP_INFO1 0x00d4 70struct vop_scl_extension {
71#define WIN2_DSP_ST1 0x00d8 71 struct vop_reg cbcr_vsd_mode;
72#define WIN2_SRC_ALPHA_CTRL 0x00dc 72 struct vop_reg cbcr_vsu_mode;
73#define WIN2_MST2 0x00e0 73 struct vop_reg cbcr_hsd_mode;
74#define WIN2_DSP_INFO2 0x00e4 74 struct vop_reg cbcr_ver_scl_mode;
75#define WIN2_DSP_ST2 0x00e8 75 struct vop_reg cbcr_hor_scl_mode;
76#define WIN2_DST_ALPHA_CTRL 0x00ec 76 struct vop_reg yrgb_vsd_mode;
77#define WIN2_MST3 0x00f0 77 struct vop_reg yrgb_vsu_mode;
78#define WIN2_DSP_INFO3 0x00f4 78 struct vop_reg yrgb_hsd_mode;
79#define WIN2_DSP_ST3 0x00f8 79 struct vop_reg yrgb_ver_scl_mode;
80#define WIN2_FADING_CTRL 0x00fc 80 struct vop_reg yrgb_hor_scl_mode;
81/* win3 register */ 81 struct vop_reg line_load_mode;
82#define WIN3_CTRL0 0x0100 82 struct vop_reg cbcr_axi_gather_num;
83#define WIN3_CTRL1 0x0104 83 struct vop_reg yrgb_axi_gather_num;
84#define WIN3_VIR0_1 0x0108 84 struct vop_reg vsd_cbcr_gt2;
85#define WIN3_VIR2_3 0x010c 85 struct vop_reg vsd_cbcr_gt4;
86#define WIN3_MST0 0x0110 86 struct vop_reg vsd_yrgb_gt2;
87#define WIN3_DSP_INFO0 0x0114 87 struct vop_reg vsd_yrgb_gt4;
88#define WIN3_DSP_ST0 0x0118 88 struct vop_reg bic_coe_sel;
89#define WIN3_COLOR_KEY 0x011c 89 struct vop_reg cbcr_axi_gather_en;
90#define WIN3_MST1 0x0120 90 struct vop_reg yrgb_axi_gather_en;
91#define WIN3_DSP_INFO1 0x0124 91 struct vop_reg lb_mode;
92#define WIN3_DSP_ST1 0x0128 92};
93#define WIN3_SRC_ALPHA_CTRL 0x012c 93
94#define WIN3_MST2 0x0130 94struct vop_scl_regs {
95#define WIN3_DSP_INFO2 0x0134 95 const struct vop_scl_extension *ext;
96#define WIN3_DSP_ST2 0x0138 96
97#define WIN3_DST_ALPHA_CTRL 0x013c 97 struct vop_reg scale_yrgb_x;
98#define WIN3_MST3 0x0140 98 struct vop_reg scale_yrgb_y;
99#define WIN3_DSP_INFO3 0x0144 99 struct vop_reg scale_cbcr_x;
100#define WIN3_DSP_ST3 0x0148 100 struct vop_reg scale_cbcr_y;
101#define WIN3_FADING_CTRL 0x014c 101};
102/* hwc register */ 102
103#define HWC_CTRL0 0x0150 103struct vop_win_phy {
104#define HWC_CTRL1 0x0154 104 const struct vop_scl_regs *scl;
105#define HWC_MST 0x0158 105 const uint32_t *data_formats;
106#define HWC_DSP_ST 0x015c 106 uint32_t nformats;
107#define HWC_SRC_ALPHA_CTRL 0x0160 107
108#define HWC_DST_ALPHA_CTRL 0x0164 108 struct vop_reg enable;
109#define HWC_FADING_CTRL 0x0168 109 struct vop_reg format;
110/* post process register */ 110 struct vop_reg rb_swap;
111#define POST_DSP_HACT_INFO 0x0170 111 struct vop_reg act_info;
112#define POST_DSP_VACT_INFO 0x0174 112 struct vop_reg dsp_info;
113#define POST_SCL_FACTOR_YRGB 0x0178 113 struct vop_reg dsp_st;
114#define POST_SCL_CTRL 0x0180 114 struct vop_reg yrgb_mst;
115#define POST_DSP_VACT_INFO_F1 0x0184 115 struct vop_reg uv_mst;
116#define DSP_HTOTAL_HS_END 0x0188 116 struct vop_reg yrgb_vir;
117#define DSP_HACT_ST_END 0x018c 117 struct vop_reg uv_vir;
118#define DSP_VTOTAL_VS_END 0x0190 118
119#define DSP_VACT_ST_END 0x0194 119 struct vop_reg dst_alpha_ctl;
120#define DSP_VS_ST_END_F1 0x0198 120 struct vop_reg src_alpha_ctl;
121#define DSP_VACT_ST_END_F1 0x019c 121};
122/* register definition end */ 122
123struct vop_win_data {
124 uint32_t base;
125 const struct vop_win_phy *phy;
126 enum drm_plane_type type;
127};
128
129struct vop_data {
130 const struct vop_reg_data *init_table;
131 unsigned int table_size;
132 const struct vop_ctrl *ctrl;
133 const struct vop_intr *intr;
134 const struct vop_win_data *win;
135 unsigned int win_size;
136};
123 137
124/* interrupt define */ 138/* interrupt define */
125#define DSP_HOLD_VALID_INTR (1 << 0) 139#define DSP_HOLD_VALID_INTR (1 << 0)
@@ -233,6 +247,11 @@ static inline uint16_t scl_cal_scale(int src, int dst, int shift)
233 return ((src * 2 - 3) << (shift - 1)) / (dst - 1); 247 return ((src * 2 - 3) << (shift - 1)) / (dst - 1);
234} 248}
235 249
250static inline uint16_t scl_cal_scale2(int src, int dst)
251{
252 return ((src - 1) << 12) / (dst - 1);
253}
254
236#define GET_SCL_FT_BILI_DN(src, dst) scl_cal_scale(src, dst, 12) 255#define GET_SCL_FT_BILI_DN(src, dst) scl_cal_scale(src, dst, 12)
237#define GET_SCL_FT_BILI_UP(src, dst) scl_cal_scale(src, dst, 16) 256#define GET_SCL_FT_BILI_UP(src, dst) scl_cal_scale(src, dst, 16)
238#define GET_SCL_FT_BIC(src, dst) scl_cal_scale(src, dst, 16) 257#define GET_SCL_FT_BIC(src, dst) scl_cal_scale(src, dst, 16)
@@ -286,4 +305,5 @@ static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
286 return lb_mode; 305 return lb_mode;
287} 306}
288 307
308extern const struct component_ops vop_component_ops;
289#endif /* _ROCKCHIP_DRM_VOP_H */ 309#endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
new file mode 100644
index 000000000000..3166b46a5893
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -0,0 +1,316 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drmP.h>
16
17#include <linux/kernel.h>
18#include <linux/component.h>
19
20#include "rockchip_drm_vop.h"
21#include "rockchip_vop_reg.h"
22
23#define VOP_REG(off, _mask, s) \
24 {.offset = off, \
25 .mask = _mask, \
26 .shift = s,}
27
28static const uint32_t formats_win_full[] = {
29 DRM_FORMAT_XRGB8888,
30 DRM_FORMAT_ARGB8888,
31 DRM_FORMAT_XBGR8888,
32 DRM_FORMAT_ABGR8888,
33 DRM_FORMAT_RGB888,
34 DRM_FORMAT_BGR888,
35 DRM_FORMAT_RGB565,
36 DRM_FORMAT_BGR565,
37 DRM_FORMAT_NV12,
38 DRM_FORMAT_NV16,
39 DRM_FORMAT_NV24,
40};
41
42static const uint32_t formats_win_lite[] = {
43 DRM_FORMAT_XRGB8888,
44 DRM_FORMAT_ARGB8888,
45 DRM_FORMAT_XBGR8888,
46 DRM_FORMAT_ABGR8888,
47 DRM_FORMAT_RGB888,
48 DRM_FORMAT_BGR888,
49 DRM_FORMAT_RGB565,
50 DRM_FORMAT_BGR565,
51};
52
53static const struct vop_scl_extension rk3288_win_full_scl_ext = {
54 .cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
55 .cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
56 .cbcr_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 28),
57 .cbcr_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 26),
58 .cbcr_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 24),
59 .yrgb_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 23),
60 .yrgb_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 22),
61 .yrgb_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 20),
62 .yrgb_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 18),
63 .yrgb_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 16),
64 .line_load_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 15),
65 .cbcr_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0x7, 12),
66 .yrgb_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0xf, 8),
67 .vsd_cbcr_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 7),
68 .vsd_cbcr_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 6),
69 .vsd_yrgb_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 5),
70 .vsd_yrgb_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 4),
71 .bic_coe_sel = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 2),
72 .cbcr_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 1),
73 .yrgb_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 0),
74 .lb_mode = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 5),
75};
76
77static const struct vop_scl_regs rk3288_win_full_scl = {
78 .ext = &rk3288_win_full_scl_ext,
79 .scale_yrgb_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
80 .scale_yrgb_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
81 .scale_cbcr_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
82 .scale_cbcr_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
83};
84
85static const struct vop_win_phy rk3288_win01_data = {
86 .scl = &rk3288_win_full_scl,
87 .data_formats = formats_win_full,
88 .nformats = ARRAY_SIZE(formats_win_full),
89 .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
90 .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
91 .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
92 .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
93 .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
94 .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
95 .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
96 .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
97 .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
98 .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
99 .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
100 .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
101};
102
103static const struct vop_win_phy rk3288_win23_data = {
104 .data_formats = formats_win_lite,
105 .nformats = ARRAY_SIZE(formats_win_lite),
106 .enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
107 .format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
108 .rb_swap = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 12),
109 .dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO0, 0x0fff0fff, 0),
110 .dsp_st = VOP_REG(RK3288_WIN2_DSP_ST0, 0x1fff1fff, 0),
111 .yrgb_mst = VOP_REG(RK3288_WIN2_MST0, 0xffffffff, 0),
112 .yrgb_vir = VOP_REG(RK3288_WIN2_VIR0_1, 0x1fff, 0),
113 .src_alpha_ctl = VOP_REG(RK3288_WIN2_SRC_ALPHA_CTRL, 0xff, 0),
114 .dst_alpha_ctl = VOP_REG(RK3288_WIN2_DST_ALPHA_CTRL, 0xff, 0),
115};
116
117static const struct vop_ctrl rk3288_ctrl_data = {
118 .standby = VOP_REG(RK3288_SYS_CTRL, 0x1, 22),
119 .gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
120 .mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
121 .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
122 .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
123 .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
124 .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
125 .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
126 .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
127 .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
128 .out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
129 .pin_pol = VOP_REG(RK3288_DSP_CTRL0, 0xf, 4),
130 .htotal_pw = VOP_REG(RK3288_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
131 .hact_st_end = VOP_REG(RK3288_DSP_HACT_ST_END, 0x1fff1fff, 0),
132 .vtotal_pw = VOP_REG(RK3288_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
133 .vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
134 .hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
135 .vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
136 .cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0),
137};
138
139static const struct vop_reg_data rk3288_init_reg_table[] = {
140 {RK3288_SYS_CTRL, 0x00c00000},
141 {RK3288_DSP_CTRL0, 0x00000000},
142 {RK3288_WIN0_CTRL0, 0x00000080},
143 {RK3288_WIN1_CTRL0, 0x00000080},
144 /* TODO: Win2/3 support multiple area function, but we haven't found
145 * a suitable way to use it yet, so let's just use them as other windows
146 * with only area 0 enabled.
147 */
148 {RK3288_WIN2_CTRL0, 0x00000010},
149 {RK3288_WIN3_CTRL0, 0x00000010},
150};
151
152/*
153 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
154 * special support to get alpha blending working. For now, just use overlay
155 * window 3 for the drm cursor.
156 *
157 */
158static const struct vop_win_data rk3288_vop_win_data[] = {
159 { .base = 0x00, .phy = &rk3288_win01_data,
160 .type = DRM_PLANE_TYPE_PRIMARY },
161 { .base = 0x40, .phy = &rk3288_win01_data,
162 .type = DRM_PLANE_TYPE_OVERLAY },
163 { .base = 0x00, .phy = &rk3288_win23_data,
164 .type = DRM_PLANE_TYPE_OVERLAY },
165 { .base = 0x50, .phy = &rk3288_win23_data,
166 .type = DRM_PLANE_TYPE_CURSOR },
167};
168
169static const int rk3288_vop_intrs[] = {
170 DSP_HOLD_VALID_INTR,
171 FS_INTR,
172 LINE_FLAG_INTR,
173 BUS_ERROR_INTR,
174};
175
176static const struct vop_intr rk3288_vop_intr = {
177 .intrs = rk3288_vop_intrs,
178 .nintrs = ARRAY_SIZE(rk3288_vop_intrs),
179 .status = VOP_REG(RK3288_INTR_CTRL0, 0xf, 0),
180 .enable = VOP_REG(RK3288_INTR_CTRL0, 0xf, 4),
181 .clear = VOP_REG(RK3288_INTR_CTRL0, 0xf, 8),
182};
183
184static const struct vop_data rk3288_vop = {
185 .init_table = rk3288_init_reg_table,
186 .table_size = ARRAY_SIZE(rk3288_init_reg_table),
187 .intr = &rk3288_vop_intr,
188 .ctrl = &rk3288_ctrl_data,
189 .win = rk3288_vop_win_data,
190 .win_size = ARRAY_SIZE(rk3288_vop_win_data),
191};
192
193static const struct vop_scl_regs rk3066_win_scl = {
194 .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
195 .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
196 .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
197 .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
198};
199
200static const struct vop_win_phy rk3036_win0_data = {
201 .scl = &rk3066_win_scl,
202 .data_formats = formats_win_full,
203 .nformats = ARRAY_SIZE(formats_win_full),
204 .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
205 .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
206 .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
207 .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
208 .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
209 .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
210 .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
211 .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
212 .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
213};
214
215static const struct vop_win_phy rk3036_win1_data = {
216 .data_formats = formats_win_lite,
217 .nformats = ARRAY_SIZE(formats_win_lite),
218 .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
219 .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
220 .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
221 .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
222 .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
223 .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
224 .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
225 .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
226};
227
228static const struct vop_win_data rk3036_vop_win_data[] = {
229 { .base = 0x00, .phy = &rk3036_win0_data,
230 .type = DRM_PLANE_TYPE_PRIMARY },
231 { .base = 0x00, .phy = &rk3036_win1_data,
232 .type = DRM_PLANE_TYPE_CURSOR },
233};
234
235static const int rk3036_vop_intrs[] = {
236 DSP_HOLD_VALID_INTR,
237 FS_INTR,
238 LINE_FLAG_INTR,
239 BUS_ERROR_INTR,
240};
241
242static const struct vop_intr rk3036_intr = {
243 .intrs = rk3036_vop_intrs,
244 .nintrs = ARRAY_SIZE(rk3036_vop_intrs),
245 .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
246 .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
247 .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
248};
249
250static const struct vop_ctrl rk3036_ctrl_data = {
251 .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
252 .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
253 .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
254 .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
255 .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
256 .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
257 .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
258 .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
259};
260
261static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
262 {RK3036_DSP_CTRL1, 0x00000000},
263};
264
265static const struct vop_data rk3036_vop = {
266 .init_table = rk3036_vop_init_reg_table,
267 .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
268 .ctrl = &rk3036_ctrl_data,
269 .intr = &rk3036_intr,
270 .win = rk3036_vop_win_data,
271 .win_size = ARRAY_SIZE(rk3036_vop_win_data),
272};
273
274static const struct of_device_id vop_driver_dt_match[] = {
275 { .compatible = "rockchip,rk3288-vop",
276 .data = &rk3288_vop },
277 { .compatible = "rockchip,rk3036-vop",
278 .data = &rk3036_vop },
279 {},
280};
281MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
282
283static int vop_probe(struct platform_device *pdev)
284{
285 struct device *dev = &pdev->dev;
286
287 if (!dev->of_node) {
288 dev_err(dev, "can't find vop devices\n");
289 return -ENODEV;
290 }
291
292 return component_add(dev, &vop_component_ops);
293}
294
295static int vop_remove(struct platform_device *pdev)
296{
297 component_del(&pdev->dev, &vop_component_ops);
298
299 return 0;
300}
301
302struct platform_driver vop_platform_driver = {
303 .probe = vop_probe,
304 .remove = vop_remove,
305 .driver = {
306 .name = "rockchip-vop",
307 .owner = THIS_MODULE,
308 .of_match_table = of_match_ptr(vop_driver_dt_match),
309 },
310};
311
312module_platform_driver(vop_platform_driver);
313
314MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
315MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
316MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
new file mode 100644
index 000000000000..d4b46cba2f26
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -0,0 +1,169 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_VOP_REG_H
16#define _ROCKCHIP_VOP_REG_H
17
18/* rk3288 register definition */
19#define RK3288_REG_CFG_DONE 0x0000
20#define RK3288_VERSION_INFO 0x0004
21#define RK3288_SYS_CTRL 0x0008
22#define RK3288_SYS_CTRL1 0x000c
23#define RK3288_DSP_CTRL0 0x0010
24#define RK3288_DSP_CTRL1 0x0014
25#define RK3288_DSP_BG 0x0018
26#define RK3288_MCU_CTRL 0x001c
27#define RK3288_INTR_CTRL0 0x0020
28#define RK3288_INTR_CTRL1 0x0024
29#define RK3288_WIN0_CTRL0 0x0030
30#define RK3288_WIN0_CTRL1 0x0034
31#define RK3288_WIN0_COLOR_KEY 0x0038
32#define RK3288_WIN0_VIR 0x003c
33#define RK3288_WIN0_YRGB_MST 0x0040
34#define RK3288_WIN0_CBR_MST 0x0044
35#define RK3288_WIN0_ACT_INFO 0x0048
36#define RK3288_WIN0_DSP_INFO 0x004c
37#define RK3288_WIN0_DSP_ST 0x0050
38#define RK3288_WIN0_SCL_FACTOR_YRGB 0x0054
39#define RK3288_WIN0_SCL_FACTOR_CBR 0x0058
40#define RK3288_WIN0_SCL_OFFSET 0x005c
41#define RK3288_WIN0_SRC_ALPHA_CTRL 0x0060
42#define RK3288_WIN0_DST_ALPHA_CTRL 0x0064
43#define RK3288_WIN0_FADING_CTRL 0x0068
44
45/* win1 register */
46#define RK3288_WIN1_CTRL0 0x0070
47#define RK3288_WIN1_CTRL1 0x0074
48#define RK3288_WIN1_COLOR_KEY 0x0078
49#define RK3288_WIN1_VIR 0x007c
50#define RK3288_WIN1_YRGB_MST 0x0080
51#define RK3288_WIN1_CBR_MST 0x0084
52#define RK3288_WIN1_ACT_INFO 0x0088
53#define RK3288_WIN1_DSP_INFO 0x008c
54#define RK3288_WIN1_DSP_ST 0x0090
55#define RK3288_WIN1_SCL_FACTOR_YRGB 0x0094
56#define RK3288_WIN1_SCL_FACTOR_CBR 0x0098
57#define RK3288_WIN1_SCL_OFFSET 0x009c
58#define RK3288_WIN1_SRC_ALPHA_CTRL 0x00a0
59#define RK3288_WIN1_DST_ALPHA_CTRL 0x00a4
60#define RK3288_WIN1_FADING_CTRL 0x00a8
61/* win2 register */
62#define RK3288_WIN2_CTRL0 0x00b0
63#define RK3288_WIN2_CTRL1 0x00b4
64#define RK3288_WIN2_VIR0_1 0x00b8
65#define RK3288_WIN2_VIR2_3 0x00bc
66#define RK3288_WIN2_MST0 0x00c0
67#define RK3288_WIN2_DSP_INFO0 0x00c4
68#define RK3288_WIN2_DSP_ST0 0x00c8
69#define RK3288_WIN2_COLOR_KEY 0x00cc
70#define RK3288_WIN2_MST1 0x00d0
71#define RK3288_WIN2_DSP_INFO1 0x00d4
72#define RK3288_WIN2_DSP_ST1 0x00d8
73#define RK3288_WIN2_SRC_ALPHA_CTRL 0x00dc
74#define RK3288_WIN2_MST2 0x00e0
75#define RK3288_WIN2_DSP_INFO2 0x00e4
76#define RK3288_WIN2_DSP_ST2 0x00e8
77#define RK3288_WIN2_DST_ALPHA_CTRL 0x00ec
78#define RK3288_WIN2_MST3 0x00f0
79#define RK3288_WIN2_DSP_INFO3 0x00f4
80#define RK3288_WIN2_DSP_ST3 0x00f8
81#define RK3288_WIN2_FADING_CTRL 0x00fc
82/* win3 register */
83#define RK3288_WIN3_CTRL0 0x0100
84#define RK3288_WIN3_CTRL1 0x0104
85#define RK3288_WIN3_VIR0_1 0x0108
86#define RK3288_WIN3_VIR2_3 0x010c
87#define RK3288_WIN3_MST0 0x0110
88#define RK3288_WIN3_DSP_INFO0 0x0114
89#define RK3288_WIN3_DSP_ST0 0x0118
90#define RK3288_WIN3_COLOR_KEY 0x011c
91#define RK3288_WIN3_MST1 0x0120
92#define RK3288_WIN3_DSP_INFO1 0x0124
93#define RK3288_WIN3_DSP_ST1 0x0128
94#define RK3288_WIN3_SRC_ALPHA_CTRL 0x012c
95#define RK3288_WIN3_MST2 0x0130
96#define RK3288_WIN3_DSP_INFO2 0x0134
97#define RK3288_WIN3_DSP_ST2 0x0138
98#define RK3288_WIN3_DST_ALPHA_CTRL 0x013c
99#define RK3288_WIN3_MST3 0x0140
100#define RK3288_WIN3_DSP_INFO3 0x0144
101#define RK3288_WIN3_DSP_ST3 0x0148
102#define RK3288_WIN3_FADING_CTRL 0x014c
103/* hwc register */
104#define RK3288_HWC_CTRL0 0x0150
105#define RK3288_HWC_CTRL1 0x0154
106#define RK3288_HWC_MST 0x0158
107#define RK3288_HWC_DSP_ST 0x015c
108#define RK3288_HWC_SRC_ALPHA_CTRL 0x0160
109#define RK3288_HWC_DST_ALPHA_CTRL 0x0164
110#define RK3288_HWC_FADING_CTRL 0x0168
111/* post process register */
112#define RK3288_POST_DSP_HACT_INFO 0x0170
113#define RK3288_POST_DSP_VACT_INFO 0x0174
114#define RK3288_POST_SCL_FACTOR_YRGB 0x0178
115#define RK3288_POST_SCL_CTRL 0x0180
116#define RK3288_POST_DSP_VACT_INFO_F1 0x0184
117#define RK3288_DSP_HTOTAL_HS_END 0x0188
118#define RK3288_DSP_HACT_ST_END 0x018c
119#define RK3288_DSP_VTOTAL_VS_END 0x0190
120#define RK3288_DSP_VACT_ST_END 0x0194
121#define RK3288_DSP_VS_ST_END_F1 0x0198
122#define RK3288_DSP_VACT_ST_END_F1 0x019c
123/* register definition end */
124
125/* rk3036 register definition */
126#define RK3036_SYS_CTRL 0x00
127#define RK3036_DSP_CTRL0 0x04
128#define RK3036_DSP_CTRL1 0x08
129#define RK3036_INT_STATUS 0x10
130#define RK3036_ALPHA_CTRL 0x14
131#define RK3036_WIN0_COLOR_KEY 0x18
132#define RK3036_WIN1_COLOR_KEY 0x1c
133#define RK3036_WIN0_YRGB_MST 0x20
134#define RK3036_WIN0_CBR_MST 0x24
135#define RK3036_WIN1_VIR 0x28
136#define RK3036_AXI_BUS_CTRL 0x2c
137#define RK3036_WIN0_VIR 0x30
138#define RK3036_WIN0_ACT_INFO 0x34
139#define RK3036_WIN0_DSP_INFO 0x38
140#define RK3036_WIN0_DSP_ST 0x3c
141#define RK3036_WIN0_SCL_FACTOR_YRGB 0x40
142#define RK3036_WIN0_SCL_FACTOR_CBR 0x44
143#define RK3036_WIN0_SCL_OFFSET 0x48
144#define RK3036_HWC_MST 0x58
145#define RK3036_HWC_DSP_ST 0x5c
146#define RK3036_DSP_HTOTAL_HS_END 0x6c
147#define RK3036_DSP_HACT_ST_END 0x70
148#define RK3036_DSP_VTOTAL_VS_END 0x74
149#define RK3036_DSP_VACT_ST_END 0x78
150#define RK3036_DSP_VS_ST_END_F1 0x7c
151#define RK3036_DSP_VACT_ST_END_F1 0x80
152#define RK3036_GATHER_TRANSFER 0x84
153#define RK3036_VERSION_INFO 0x94
154#define RK3036_REG_CFG_DONE 0x90
155#define RK3036_WIN1_MST 0xa0
156#define RK3036_WIN1_ACT_INFO 0xb4
157#define RK3036_WIN1_DSP_INFO 0xb8
158#define RK3036_WIN1_DSP_ST 0xbc
159#define RK3036_WIN1_SCL_FACTOR_YRGB 0xc0
160#define RK3036_WIN1_SCL_OFFSET 0xc8
161#define RK3036_BCSH_CTRL 0xd0
162#define RK3036_BCSH_COLOR_BAR 0xd4
163#define RK3036_BCSH_BCS 0xd8
164#define RK3036_BCSH_H 0xdc
165#define RK3036_WIN1_LUT_ADDR 0x400
166#define RK3036_HWC_LUT_ADDR 0x800
167/* rk3036 register definition end */
168
169#endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index e9272b0a8592..db0763794edc 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -613,7 +613,7 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
613 encoder->possible_crtcs = 1; 613 encoder->possible_crtcs = 1;
614 614
615 ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs, 615 ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
616 DRM_MODE_ENCODER_LVDS); 616 DRM_MODE_ENCODER_LVDS, NULL);
617 if (ret < 0) 617 if (ret < 0)
618 return ret; 618 return ret;
619 619
@@ -739,8 +739,6 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
739 if (ret < 0) 739 if (ret < 0)
740 goto err_backlight; 740 goto err_backlight;
741 741
742 connector->encoder = encoder;
743
744 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 742 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
745 drm_object_property_set_value(&connector->base, 743 drm_object_property_set_value(&connector->base,
746 sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 744 sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 3ae09dcd4fd8..de11c7cfb02f 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -367,7 +367,7 @@ int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
367 int res; 367 int res;
368 368
369 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 369 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
370 &sti_crtc_funcs); 370 &sti_crtc_funcs, NULL);
371 if (res) { 371 if (res) {
372 DRM_ERROR("Can't initialze CRTC\n"); 372 DRM_ERROR("Can't initialze CRTC\n");
373 return -EINVAL; 373 return -EINVAL;
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index dd1032195051..807863106b8d 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -272,7 +272,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
272 &sti_plane_helpers_funcs, 272 &sti_plane_helpers_funcs,
273 cursor_supported_formats, 273 cursor_supported_formats,
274 ARRAY_SIZE(cursor_supported_formats), 274 ARRAY_SIZE(cursor_supported_formats),
275 DRM_PLANE_TYPE_CURSOR); 275 DRM_PLANE_TYPE_CURSOR, NULL);
276 if (res) { 276 if (res) {
277 DRM_ERROR("Failed to initialize universal plane\n"); 277 DRM_ERROR("Failed to initialize universal plane\n");
278 goto err_plane; 278 goto err_plane;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 1469987949d8..506b5626f3ed 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -160,6 +160,7 @@ static int sti_load(struct drm_device *dev, unsigned long flags)
160 160
161 drm_mode_config_reset(dev); 161 drm_mode_config_reset(dev);
162 162
163 drm_helper_disable_unused_functions(dev);
163 drm_fbdev_cma_init(dev, 32, 164 drm_fbdev_cma_init(dev, 32,
164 dev->mode_config.num_crtc, 165 dev->mode_config.num_crtc,
165 dev->mode_config.num_connector); 166 dev->mode_config.num_connector);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index c85dc7d6b005..f9a1d92c9d95 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -630,7 +630,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
630 &sti_plane_helpers_funcs, 630 &sti_plane_helpers_funcs,
631 gdp_supported_formats, 631 gdp_supported_formats,
632 ARRAY_SIZE(gdp_supported_formats), 632 ARRAY_SIZE(gdp_supported_formats),
633 type); 633 type, NULL);
634 if (res) { 634 if (res) {
635 DRM_ERROR("Failed to initialize universal plane\n"); 635 DRM_ERROR("Failed to initialize universal plane\n");
636 goto err; 636 goto err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index d735daccd458..49cce833f2c8 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -543,8 +543,6 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
543 count++; 543 count++;
544 } 544 }
545 545
546 drm_mode_sort(&connector->modes);
547
548 return count; 546 return count;
549} 547}
550 548
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index ea0690bc77d5..43861b52261d 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -973,7 +973,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
973 &sti_plane_helpers_funcs, 973 &sti_plane_helpers_funcs,
974 hqvdp_supported_formats, 974 hqvdp_supported_formats,
975 ARRAY_SIZE(hqvdp_supported_formats), 975 ARRAY_SIZE(hqvdp_supported_formats),
976 DRM_PLANE_TYPE_OVERLAY); 976 DRM_PLANE_TYPE_OVERLAY, NULL);
977 if (res) { 977 if (res) {
978 DRM_ERROR("Failed to initialize universal plane\n"); 978 DRM_ERROR("Failed to initialize universal plane\n");
979 return NULL; 979 return NULL;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index c8a4c5dae2b6..f2afcf5438b8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -512,7 +512,8 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev,
512 drm_encoder->possible_clones = 1 << 0; 512 drm_encoder->possible_clones = 1 << 0;
513 513
514 drm_encoder_init(dev, drm_encoder, 514 drm_encoder_init(dev, drm_encoder,
515 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS); 515 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS,
516 NULL);
516 517
517 drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs); 518 drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs);
518 519
@@ -564,7 +565,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
564 drm_encoder->possible_clones = 1 << 0; 565 drm_encoder->possible_clones = 1 << 0;
565 566
566 drm_encoder_init(dev, drm_encoder, 567 drm_encoder_init(dev, drm_encoder,
567 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC); 568 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
568 569
569 drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs); 570 drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
570 571
@@ -613,7 +614,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
613 drm_encoder->possible_clones = 1 << 1; 614 drm_encoder->possible_clones = 1 << 1;
614 615
615 drm_encoder_init(dev, drm_encoder, 616 drm_encoder_init(dev, drm_encoder,
616 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS); 617 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL);
617 618
618 drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs); 619 drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
619 620
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index e9f24a85a103..dde6f208c347 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -660,7 +660,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
660 660
661 err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, 661 err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
662 &tegra_primary_plane_funcs, formats, 662 &tegra_primary_plane_funcs, formats,
663 num_formats, DRM_PLANE_TYPE_PRIMARY); 663 num_formats, DRM_PLANE_TYPE_PRIMARY,
664 NULL);
664 if (err < 0) { 665 if (err < 0) {
665 kfree(plane); 666 kfree(plane);
666 return ERR_PTR(err); 667 return ERR_PTR(err);
@@ -827,7 +828,8 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
827 828
828 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, 829 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
829 &tegra_cursor_plane_funcs, formats, 830 &tegra_cursor_plane_funcs, formats,
830 num_formats, DRM_PLANE_TYPE_CURSOR); 831 num_formats, DRM_PLANE_TYPE_CURSOR,
832 NULL);
831 if (err < 0) { 833 if (err < 0) {
832 kfree(plane); 834 kfree(plane);
833 return ERR_PTR(err); 835 return ERR_PTR(err);
@@ -890,7 +892,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
890 892
891 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, 893 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
892 &tegra_overlay_plane_funcs, formats, 894 &tegra_overlay_plane_funcs, formats,
893 num_formats, DRM_PLANE_TYPE_OVERLAY); 895 num_formats, DRM_PLANE_TYPE_OVERLAY,
896 NULL);
894 if (err < 0) { 897 if (err < 0) {
895 kfree(plane); 898 kfree(plane);
896 return ERR_PTR(err); 899 return ERR_PTR(err);
@@ -1732,7 +1735,7 @@ static int tegra_dc_init(struct host1x_client *client)
1732 } 1735 }
1733 1736
1734 err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor, 1737 err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
1735 &tegra_crtc_funcs); 1738 &tegra_crtc_funcs, NULL);
1736 if (err < 0) 1739 if (err < 0)
1737 goto cleanup; 1740 goto cleanup;
1738 1741
@@ -1952,8 +1955,10 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
1952 * cases where only a single display controller is used. 1955 * cases where only a single display controller is used.
1953 */ 1956 */
1954 for_each_matching_node(np, tegra_dc_of_match) { 1957 for_each_matching_node(np, tegra_dc_of_match) {
1955 if (np == dc->dev->of_node) 1958 if (np == dc->dev->of_node) {
1959 of_node_put(np);
1956 break; 1960 break;
1961 }
1957 1962
1958 value++; 1963 value++;
1959 } 1964 }
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 6aecb6647313..b24a0f14821a 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -436,7 +436,7 @@ struct platform_driver tegra_dpaux_driver = {
436 .remove = tegra_dpaux_remove, 436 .remove = tegra_dpaux_remove,
437}; 437};
438 438
439struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np) 439struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np)
440{ 440{
441 struct tegra_dpaux *dpaux; 441 struct tegra_dpaux *dpaux;
442 442
@@ -445,7 +445,7 @@ struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
445 list_for_each_entry(dpaux, &dpaux_list, list) 445 list_for_each_entry(dpaux, &dpaux_list, list)
446 if (np == dpaux->dev->of_node) { 446 if (np == dpaux->dev->of_node) {
447 mutex_unlock(&dpaux_lock); 447 mutex_unlock(&dpaux_lock);
448 return dpaux; 448 return &dpaux->aux;
449 } 449 }
450 450
451 mutex_unlock(&dpaux_lock); 451 mutex_unlock(&dpaux_lock);
@@ -453,8 +453,9 @@ struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
453 return NULL; 453 return NULL;
454} 454}
455 455
456int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output) 456int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
457{ 457{
458 struct tegra_dpaux *dpaux = to_dpaux(aux);
458 unsigned long timeout; 459 unsigned long timeout;
459 int err; 460 int err;
460 461
@@ -470,7 +471,7 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
470 while (time_before(jiffies, timeout)) { 471 while (time_before(jiffies, timeout)) {
471 enum drm_connector_status status; 472 enum drm_connector_status status;
472 473
473 status = tegra_dpaux_detect(dpaux); 474 status = drm_dp_aux_detect(aux);
474 if (status == connector_status_connected) { 475 if (status == connector_status_connected) {
475 enable_irq(dpaux->irq); 476 enable_irq(dpaux->irq);
476 return 0; 477 return 0;
@@ -482,8 +483,9 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
482 return -ETIMEDOUT; 483 return -ETIMEDOUT;
483} 484}
484 485
485int tegra_dpaux_detach(struct tegra_dpaux *dpaux) 486int drm_dp_aux_detach(struct drm_dp_aux *aux)
486{ 487{
488 struct tegra_dpaux *dpaux = to_dpaux(aux);
487 unsigned long timeout; 489 unsigned long timeout;
488 int err; 490 int err;
489 491
@@ -498,7 +500,7 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
498 while (time_before(jiffies, timeout)) { 500 while (time_before(jiffies, timeout)) {
499 enum drm_connector_status status; 501 enum drm_connector_status status;
500 502
501 status = tegra_dpaux_detect(dpaux); 503 status = drm_dp_aux_detect(aux);
502 if (status == connector_status_disconnected) { 504 if (status == connector_status_disconnected) {
503 dpaux->output = NULL; 505 dpaux->output = NULL;
504 return 0; 506 return 0;
@@ -510,8 +512,9 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
510 return -ETIMEDOUT; 512 return -ETIMEDOUT;
511} 513}
512 514
513enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux) 515enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
514{ 516{
517 struct tegra_dpaux *dpaux = to_dpaux(aux);
515 u32 value; 518 u32 value;
516 519
517 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT); 520 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
@@ -522,8 +525,9 @@ enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
522 return connector_status_disconnected; 525 return connector_status_disconnected;
523} 526}
524 527
525int tegra_dpaux_enable(struct tegra_dpaux *dpaux) 528int drm_dp_aux_enable(struct drm_dp_aux *aux)
526{ 529{
530 struct tegra_dpaux *dpaux = to_dpaux(aux);
527 u32 value; 531 u32 value;
528 532
529 value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | 533 value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
@@ -540,8 +544,9 @@ int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
540 return 0; 544 return 0;
541} 545}
542 546
543int tegra_dpaux_disable(struct tegra_dpaux *dpaux) 547int drm_dp_aux_disable(struct drm_dp_aux *aux)
544{ 548{
549 struct tegra_dpaux *dpaux = to_dpaux(aux);
545 u32 value; 550 u32 value;
546 551
547 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); 552 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
@@ -551,11 +556,11 @@ int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
551 return 0; 556 return 0;
552} 557}
553 558
554int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding) 559int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
555{ 560{
556 int err; 561 int err;
557 562
558 err = drm_dp_dpcd_writeb(&dpaux->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, 563 err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
559 encoding); 564 encoding);
560 if (err < 0) 565 if (err < 0)
561 return err; 566 return err;
@@ -563,15 +568,15 @@ int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding)
563 return 0; 568 return 0;
564} 569}
565 570
566int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link, 571int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
567 u8 pattern) 572 u8 pattern)
568{ 573{
569 u8 tp = pattern & DP_TRAINING_PATTERN_MASK; 574 u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
570 u8 status[DP_LINK_STATUS_SIZE], values[4]; 575 u8 status[DP_LINK_STATUS_SIZE], values[4];
571 unsigned int i; 576 unsigned int i;
572 int err; 577 int err;
573 578
574 err = drm_dp_dpcd_writeb(&dpaux->aux, DP_TRAINING_PATTERN_SET, pattern); 579 err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
575 if (err < 0) 580 if (err < 0)
576 return err; 581 return err;
577 582
@@ -584,14 +589,14 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
584 DP_TRAIN_MAX_SWING_REACHED | 589 DP_TRAIN_MAX_SWING_REACHED |
585 DP_TRAIN_VOLTAGE_SWING_LEVEL_0; 590 DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
586 591
587 err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values, 592 err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
588 link->num_lanes); 593 link->num_lanes);
589 if (err < 0) 594 if (err < 0)
590 return err; 595 return err;
591 596
592 usleep_range(500, 1000); 597 usleep_range(500, 1000);
593 598
594 err = drm_dp_dpcd_read_link_status(&dpaux->aux, status); 599 err = drm_dp_dpcd_read_link_status(aux, status);
595 if (err < 0) 600 if (err < 0)
596 return err; 601 return err;
597 602
@@ -609,11 +614,11 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
609 break; 614 break;
610 615
611 default: 616 default:
612 dev_err(dpaux->dev, "unsupported training pattern %u\n", tp); 617 dev_err(aux->dev, "unsupported training pattern %u\n", tp);
613 return -EINVAL; 618 return -EINVAL;
614 } 619 }
615 620
616 err = drm_dp_dpcd_writeb(&dpaux->aux, DP_EDP_CONFIGURATION_SET, 0); 621 err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
617 if (err < 0) 622 if (err < 0)
618 return err; 623 return err;
619 624
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e0f827790a5e..c5c856a0879d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -137,8 +137,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
137 start = geometry->aperture_start; 137 start = geometry->aperture_start;
138 end = geometry->aperture_end; 138 end = geometry->aperture_end;
139 139
140 DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n", 140 DRM_DEBUG_DRIVER("IOMMU aperture initialized (%#llx-%#llx)\n",
141 start, end); 141 start, end);
142 drm_mm_init(&tegra->mm, start, end - start + 1); 142 drm_mm_init(&tegra->mm, start, end - start + 1);
143 } 143 }
144 144
@@ -277,9 +277,7 @@ host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
277 if (!gem) 277 if (!gem)
278 return NULL; 278 return NULL;
279 279
280 mutex_lock(&drm->struct_mutex); 280 drm_gem_object_unreference_unlocked(gem);
281 drm_gem_object_unreference(gem);
282 mutex_unlock(&drm->struct_mutex);
283 281
284 bo = to_tegra_bo(gem); 282 bo = to_tegra_bo(gem);
285 return &bo->base; 283 return &bo->base;
@@ -473,7 +471,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
473 471
474 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 472 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
475 473
476 drm_gem_object_unreference(gem); 474 drm_gem_object_unreference_unlocked(gem);
477 475
478 return 0; 476 return 0;
479} 477}
@@ -683,7 +681,7 @@ static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
683 bo->tiling.mode = mode; 681 bo->tiling.mode = mode;
684 bo->tiling.value = value; 682 bo->tiling.value = value;
685 683
686 drm_gem_object_unreference(gem); 684 drm_gem_object_unreference_unlocked(gem);
687 685
688 return 0; 686 return 0;
689} 687}
@@ -723,7 +721,7 @@ static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
723 break; 721 break;
724 } 722 }
725 723
726 drm_gem_object_unreference(gem); 724 drm_gem_object_unreference_unlocked(gem);
727 725
728 return err; 726 return err;
729} 727}
@@ -748,7 +746,7 @@ static int tegra_gem_set_flags(struct drm_device *drm, void *data,
748 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 746 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
749 bo->flags |= TEGRA_BO_BOTTOM_UP; 747 bo->flags |= TEGRA_BO_BOTTOM_UP;
750 748
751 drm_gem_object_unreference(gem); 749 drm_gem_object_unreference_unlocked(gem);
752 750
753 return 0; 751 return 0;
754} 752}
@@ -770,7 +768,7 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
770 if (bo->flags & TEGRA_BO_BOTTOM_UP) 768 if (bo->flags & TEGRA_BO_BOTTOM_UP)
771 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 769 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
772 770
773 drm_gem_object_unreference(gem); 771 drm_gem_object_unreference_unlocked(gem);
774 772
775 return 0; 773 return 0;
776} 774}
@@ -921,7 +919,8 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
921#endif 919#endif
922 920
923static struct drm_driver tegra_drm_driver = { 921static struct drm_driver tegra_drm_driver = {
924 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 922 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
923 DRIVER_ATOMIC,
925 .load = tegra_drm_load, 924 .load = tegra_drm_load,
926 .unload = tegra_drm_unload, 925 .unload = tegra_drm_unload,
927 .open = tegra_drm_open, 926 .open = tegra_drm_open,
@@ -991,7 +990,6 @@ static int host1x_drm_probe(struct host1x_device *dev)
991 if (!drm) 990 if (!drm)
992 return -ENOMEM; 991 return -ENOMEM;
993 992
994 drm_dev_set_unique(drm, dev_name(&dev->dev));
995 dev_set_drvdata(&dev->dev, drm); 993 dev_set_drvdata(&dev->dev, drm);
996 994
997 err = drm_dev_register(drm, 0); 995 err = drm_dev_register(drm, 0);
@@ -1023,8 +1021,17 @@ static int host1x_drm_remove(struct host1x_device *dev)
1023static int host1x_drm_suspend(struct device *dev) 1021static int host1x_drm_suspend(struct device *dev)
1024{ 1022{
1025 struct drm_device *drm = dev_get_drvdata(dev); 1023 struct drm_device *drm = dev_get_drvdata(dev);
1024 struct tegra_drm *tegra = drm->dev_private;
1026 1025
1027 drm_kms_helper_poll_disable(drm); 1026 drm_kms_helper_poll_disable(drm);
1027 tegra_drm_fb_suspend(drm);
1028
1029 tegra->state = drm_atomic_helper_suspend(drm);
1030 if (IS_ERR(tegra->state)) {
1031 tegra_drm_fb_resume(drm);
1032 drm_kms_helper_poll_enable(drm);
1033 return PTR_ERR(tegra->state);
1034 }
1028 1035
1029 return 0; 1036 return 0;
1030} 1037}
@@ -1032,7 +1039,10 @@ static int host1x_drm_suspend(struct device *dev)
1032static int host1x_drm_resume(struct device *dev) 1039static int host1x_drm_resume(struct device *dev)
1033{ 1040{
1034 struct drm_device *drm = dev_get_drvdata(dev); 1041 struct drm_device *drm = dev_get_drvdata(dev);
1042 struct tegra_drm *tegra = drm->dev_private;
1035 1043
1044 drm_atomic_helper_resume(drm, tegra->state);
1045 tegra_drm_fb_resume(drm);
1036 drm_kms_helper_poll_enable(drm); 1046 drm_kms_helper_poll_enable(drm);
1037 1047
1038 return 0; 1048 return 0;
@@ -1076,6 +1086,16 @@ static struct host1x_driver host1x_drm_driver = {
1076 .subdevs = host1x_drm_subdevs, 1086 .subdevs = host1x_drm_subdevs,
1077}; 1087};
1078 1088
1089static struct platform_driver * const drivers[] = {
1090 &tegra_dc_driver,
1091 &tegra_hdmi_driver,
1092 &tegra_dsi_driver,
1093 &tegra_dpaux_driver,
1094 &tegra_sor_driver,
1095 &tegra_gr2d_driver,
1096 &tegra_gr3d_driver,
1097};
1098
1079static int __init host1x_drm_init(void) 1099static int __init host1x_drm_init(void)
1080{ 1100{
1081 int err; 1101 int err;
@@ -1084,48 +1104,12 @@ static int __init host1x_drm_init(void)
1084 if (err < 0) 1104 if (err < 0)
1085 return err; 1105 return err;
1086 1106
1087 err = platform_driver_register(&tegra_dc_driver); 1107 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1088 if (err < 0) 1108 if (err < 0)
1089 goto unregister_host1x; 1109 goto unregister_host1x;
1090 1110
1091 err = platform_driver_register(&tegra_dsi_driver);
1092 if (err < 0)
1093 goto unregister_dc;
1094
1095 err = platform_driver_register(&tegra_sor_driver);
1096 if (err < 0)
1097 goto unregister_dsi;
1098
1099 err = platform_driver_register(&tegra_hdmi_driver);
1100 if (err < 0)
1101 goto unregister_sor;
1102
1103 err = platform_driver_register(&tegra_dpaux_driver);
1104 if (err < 0)
1105 goto unregister_hdmi;
1106
1107 err = platform_driver_register(&tegra_gr2d_driver);
1108 if (err < 0)
1109 goto unregister_dpaux;
1110
1111 err = platform_driver_register(&tegra_gr3d_driver);
1112 if (err < 0)
1113 goto unregister_gr2d;
1114
1115 return 0; 1111 return 0;
1116 1112
1117unregister_gr2d:
1118 platform_driver_unregister(&tegra_gr2d_driver);
1119unregister_dpaux:
1120 platform_driver_unregister(&tegra_dpaux_driver);
1121unregister_hdmi:
1122 platform_driver_unregister(&tegra_hdmi_driver);
1123unregister_sor:
1124 platform_driver_unregister(&tegra_sor_driver);
1125unregister_dsi:
1126 platform_driver_unregister(&tegra_dsi_driver);
1127unregister_dc:
1128 platform_driver_unregister(&tegra_dc_driver);
1129unregister_host1x: 1113unregister_host1x:
1130 host1x_driver_unregister(&host1x_drm_driver); 1114 host1x_driver_unregister(&host1x_drm_driver);
1131 return err; 1115 return err;
@@ -1134,13 +1118,7 @@ module_init(host1x_drm_init);
1134 1118
1135static void __exit host1x_drm_exit(void) 1119static void __exit host1x_drm_exit(void)
1136{ 1120{
1137 platform_driver_unregister(&tegra_gr3d_driver); 1121 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1138 platform_driver_unregister(&tegra_gr2d_driver);
1139 platform_driver_unregister(&tegra_dpaux_driver);
1140 platform_driver_unregister(&tegra_hdmi_driver);
1141 platform_driver_unregister(&tegra_sor_driver);
1142 platform_driver_unregister(&tegra_dsi_driver);
1143 platform_driver_unregister(&tegra_dc_driver);
1144 host1x_driver_unregister(&host1x_drm_driver); 1122 host1x_driver_unregister(&host1x_drm_driver);
1145} 1123}
1146module_exit(host1x_drm_exit); 1124module_exit(host1x_drm_exit);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index d88a2d18c1a4..c088f2f67eda 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -57,6 +57,8 @@ struct tegra_drm {
57 struct work_struct work; 57 struct work_struct work;
58 struct mutex lock; 58 struct mutex lock;
59 } commit; 59 } commit;
60
61 struct drm_atomic_state *state;
60}; 62};
61 63
62struct tegra_drm_client; 64struct tegra_drm_client;
@@ -247,18 +249,17 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
247void tegra_output_encoder_destroy(struct drm_encoder *encoder); 249void tegra_output_encoder_destroy(struct drm_encoder *encoder);
248 250
249/* from dpaux.c */ 251/* from dpaux.c */
250struct tegra_dpaux;
251struct drm_dp_link; 252struct drm_dp_link;
252 253
253struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np); 254struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
254enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux); 255enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
255int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output); 256int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
256int tegra_dpaux_detach(struct tegra_dpaux *dpaux); 257int drm_dp_aux_detach(struct drm_dp_aux *aux);
257int tegra_dpaux_enable(struct tegra_dpaux *dpaux); 258int drm_dp_aux_enable(struct drm_dp_aux *aux);
258int tegra_dpaux_disable(struct tegra_dpaux *dpaux); 259int drm_dp_aux_disable(struct drm_dp_aux *aux);
259int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding); 260int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
260int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link, 261int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
261 u8 pattern); 262 u8 pattern);
262 263
263/* from fb.c */ 264/* from fb.c */
264struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, 265struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
@@ -273,16 +274,18 @@ int tegra_drm_fb_prepare(struct drm_device *drm);
273void tegra_drm_fb_free(struct drm_device *drm); 274void tegra_drm_fb_free(struct drm_device *drm);
274int tegra_drm_fb_init(struct drm_device *drm); 275int tegra_drm_fb_init(struct drm_device *drm);
275void tegra_drm_fb_exit(struct drm_device *drm); 276void tegra_drm_fb_exit(struct drm_device *drm);
277void tegra_drm_fb_suspend(struct drm_device *drm);
278void tegra_drm_fb_resume(struct drm_device *drm);
276#ifdef CONFIG_DRM_FBDEV_EMULATION 279#ifdef CONFIG_DRM_FBDEV_EMULATION
277void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); 280void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
278void tegra_fb_output_poll_changed(struct drm_device *drm); 281void tegra_fb_output_poll_changed(struct drm_device *drm);
279#endif 282#endif
280 283
281extern struct platform_driver tegra_dc_driver; 284extern struct platform_driver tegra_dc_driver;
282extern struct platform_driver tegra_dsi_driver;
283extern struct platform_driver tegra_sor_driver;
284extern struct platform_driver tegra_hdmi_driver; 285extern struct platform_driver tegra_hdmi_driver;
286extern struct platform_driver tegra_dsi_driver;
285extern struct platform_driver tegra_dpaux_driver; 287extern struct platform_driver tegra_dpaux_driver;
288extern struct platform_driver tegra_sor_driver;
286extern struct platform_driver tegra_gr2d_driver; 289extern struct platform_driver tegra_gr2d_driver;
287extern struct platform_driver tegra_gr3d_driver; 290extern struct platform_driver tegra_gr3d_driver;
288 291
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index f0a138ef68ce..44e102799195 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -745,14 +745,13 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
745 745
746static void tegra_dsi_connector_reset(struct drm_connector *connector) 746static void tegra_dsi_connector_reset(struct drm_connector *connector)
747{ 747{
748 struct tegra_dsi_state *state; 748 struct tegra_dsi_state *state =
749 749 kzalloc(sizeof(*state), GFP_KERNEL);
750 kfree(connector->state);
751 connector->state = NULL;
752 750
753 state = kzalloc(sizeof(*state), GFP_KERNEL); 751 if (state) {
754 if (state) 752 kfree(connector->state);
755 connector->state = &state->base; 753 __drm_atomic_helper_connector_reset(connector, &state->base);
754 }
756} 755}
757 756
758static struct drm_connector_state * 757static struct drm_connector_state *
@@ -1023,7 +1022,7 @@ static int tegra_dsi_init(struct host1x_client *client)
1023 1022
1024 drm_encoder_init(drm, &dsi->output.encoder, 1023 drm_encoder_init(drm, &dsi->output.encoder,
1025 &tegra_dsi_encoder_funcs, 1024 &tegra_dsi_encoder_funcs,
1026 DRM_MODE_ENCODER_DSI); 1025 DRM_MODE_ENCODER_DSI, NULL);
1027 drm_encoder_helper_add(&dsi->output.encoder, 1026 drm_encoder_helper_add(&dsi->output.encoder,
1028 &tegra_dsi_encoder_helper_funcs); 1027 &tegra_dsi_encoder_helper_funcs);
1029 1028
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index ede9e94f3312..ca84de9ccb51 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,6 +10,8 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/console.h>
14
13#include "drm.h" 15#include "drm.h"
14#include "gem.h" 16#include "gem.h"
15 17
@@ -86,7 +88,7 @@ static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
86 return drm_gem_handle_create(file, &fb->planes[0]->gem, handle); 88 return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
87} 89}
88 90
89static struct drm_framebuffer_funcs tegra_fb_funcs = { 91static const struct drm_framebuffer_funcs tegra_fb_funcs = {
90 .destroy = tegra_fb_destroy, 92 .destroy = tegra_fb_destroy,
91 .create_handle = tegra_fb_create_handle, 93 .create_handle = tegra_fb_create_handle,
92}; 94};
@@ -413,3 +415,25 @@ void tegra_drm_fb_exit(struct drm_device *drm)
413 tegra_fbdev_exit(tegra->fbdev); 415 tegra_fbdev_exit(tegra->fbdev);
414#endif 416#endif
415} 417}
418
419void tegra_drm_fb_suspend(struct drm_device *drm)
420{
421#ifdef CONFIG_DRM_FBDEV_EMULATION
422 struct tegra_drm *tegra = drm->dev_private;
423
424 console_lock();
425 drm_fb_helper_set_suspend(&tegra->fbdev->base, 1);
426 console_unlock();
427#endif
428}
429
430void tegra_drm_fb_resume(struct drm_device *drm)
431{
432#ifdef CONFIG_DRM_FBDEV_EMULATION
433 struct tegra_drm *tegra = drm->dev_private;
434
435 console_lock();
436 drm_fb_helper_set_suspend(&tegra->fbdev->base, 0);
437 console_unlock();
438#endif
439}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 01e16e146bfe..33add93b4ed9 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -28,11 +28,8 @@ static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
28static void tegra_bo_put(struct host1x_bo *bo) 28static void tegra_bo_put(struct host1x_bo *bo)
29{ 29{
30 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 30 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
31 struct drm_device *drm = obj->gem.dev;
32 31
33 mutex_lock(&drm->struct_mutex); 32 drm_gem_object_unreference_unlocked(&obj->gem);
34 drm_gem_object_unreference(&obj->gem);
35 mutex_unlock(&drm->struct_mutex);
36} 33}
37 34
38static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 35static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
@@ -72,11 +69,8 @@ static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
72static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 69static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
73{ 70{
74 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 71 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
75 struct drm_device *drm = obj->gem.dev;
76 72
77 mutex_lock(&drm->struct_mutex);
78 drm_gem_object_reference(&obj->gem); 73 drm_gem_object_reference(&obj->gem);
79 mutex_unlock(&drm->struct_mutex);
80 74
81 return bo; 75 return bo;
82} 76}
@@ -408,12 +402,9 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
408 struct drm_gem_object *gem; 402 struct drm_gem_object *gem;
409 struct tegra_bo *bo; 403 struct tegra_bo *bo;
410 404
411 mutex_lock(&drm->struct_mutex);
412
413 gem = drm_gem_object_lookup(drm, file, handle); 405 gem = drm_gem_object_lookup(drm, file, handle);
414 if (!gem) { 406 if (!gem) {
415 dev_err(drm->dev, "failed to lookup GEM object\n"); 407 dev_err(drm->dev, "failed to lookup GEM object\n");
416 mutex_unlock(&drm->struct_mutex);
417 return -EINVAL; 408 return -EINVAL;
418 } 409 }
419 410
@@ -421,9 +412,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
421 412
422 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 413 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
423 414
424 drm_gem_object_unreference(gem); 415 drm_gem_object_unreference_unlocked(gem);
425
426 mutex_unlock(&drm->struct_mutex);
427 416
428 return 0; 417 return 0;
429} 418}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 52b32cbd9de6..b7ef4929e347 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1320,7 +1320,7 @@ static int tegra_hdmi_init(struct host1x_client *client)
1320 hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; 1320 hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
1321 1321
1322 drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, 1322 drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
1323 DRM_MODE_ENCODER_TMDS); 1323 DRM_MODE_ENCODER_TMDS, NULL);
1324 drm_encoder_helper_add(&hdmi->output.encoder, 1324 drm_encoder_helper_add(&hdmi->output.encoder,
1325 &tegra_hdmi_encoder_helper_funcs); 1325 &tegra_hdmi_encoder_helper_funcs);
1326 1326
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index bc9735b4ad60..e246334e0252 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -287,7 +287,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
287 output->connector.dpms = DRM_MODE_DPMS_OFF; 287 output->connector.dpms = DRM_MODE_DPMS_OFF;
288 288
289 drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, 289 drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
290 DRM_MODE_ENCODER_LVDS); 290 DRM_MODE_ENCODER_LVDS, NULL);
291 drm_encoder_helper_add(&output->encoder, 291 drm_encoder_helper_add(&output->encoder,
292 &tegra_rgb_encoder_helper_funcs); 292 &tegra_rgb_encoder_helper_funcs);
293 293
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 3eff7cf75d25..757c6e8603af 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -173,7 +173,7 @@ struct tegra_sor {
173 struct clk *clk_dp; 173 struct clk *clk_dp;
174 struct clk *clk; 174 struct clk *clk;
175 175
176 struct tegra_dpaux *dpaux; 176 struct drm_dp_aux *aux;
177 177
178 struct drm_info_list *debugfs_files; 178 struct drm_info_list *debugfs_files;
179 struct drm_minor *minor; 179 struct drm_minor *minor;
@@ -273,7 +273,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
273 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0); 273 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
274 tegra_sor_writel(sor, value, SOR_DP_PADCTL0); 274 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
275 275
276 err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B); 276 err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
277 if (err < 0) 277 if (err < 0)
278 return err; 278 return err;
279 279
@@ -288,7 +288,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
288 288
289 pattern = DP_TRAINING_PATTERN_1; 289 pattern = DP_TRAINING_PATTERN_1;
290 290
291 err = tegra_dpaux_train(sor->dpaux, link, pattern); 291 err = drm_dp_aux_train(sor->aux, link, pattern);
292 if (err < 0) 292 if (err < 0)
293 return err; 293 return err;
294 294
@@ -309,7 +309,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
309 309
310 pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2; 310 pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
311 311
312 err = tegra_dpaux_train(sor->dpaux, link, pattern); 312 err = drm_dp_aux_train(sor->aux, link, pattern);
313 if (err < 0) 313 if (err < 0)
314 return err; 314 return err;
315 315
@@ -324,7 +324,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
324 324
325 pattern = DP_TRAINING_PATTERN_DISABLE; 325 pattern = DP_TRAINING_PATTERN_DISABLE;
326 326
327 err = tegra_dpaux_train(sor->dpaux, link, pattern); 327 err = drm_dp_aux_train(sor->aux, link, pattern);
328 if (err < 0) 328 if (err < 0)
329 return err; 329 return err;
330 330
@@ -1044,8 +1044,8 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
1044 struct tegra_output *output = connector_to_output(connector); 1044 struct tegra_output *output = connector_to_output(connector);
1045 struct tegra_sor *sor = to_sor(output); 1045 struct tegra_sor *sor = to_sor(output);
1046 1046
1047 if (sor->dpaux) 1047 if (sor->aux)
1048 return tegra_dpaux_detect(sor->dpaux); 1048 return drm_dp_aux_detect(sor->aux);
1049 1049
1050 return tegra_output_connector_detect(connector, force); 1050 return tegra_output_connector_detect(connector, force);
1051} 1051}
@@ -1066,13 +1066,13 @@ static int tegra_sor_connector_get_modes(struct drm_connector *connector)
1066 struct tegra_sor *sor = to_sor(output); 1066 struct tegra_sor *sor = to_sor(output);
1067 int err; 1067 int err;
1068 1068
1069 if (sor->dpaux) 1069 if (sor->aux)
1070 tegra_dpaux_enable(sor->dpaux); 1070 drm_dp_aux_enable(sor->aux);
1071 1071
1072 err = tegra_output_connector_get_modes(connector); 1072 err = tegra_output_connector_get_modes(connector);
1073 1073
1074 if (sor->dpaux) 1074 if (sor->aux)
1075 tegra_dpaux_disable(sor->dpaux); 1075 drm_dp_aux_disable(sor->aux);
1076 1076
1077 return err; 1077 return err;
1078} 1078}
@@ -1128,8 +1128,8 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
1128 if (err < 0) 1128 if (err < 0)
1129 dev_err(sor->dev, "failed to power down SOR: %d\n", err); 1129 dev_err(sor->dev, "failed to power down SOR: %d\n", err);
1130 1130
1131 if (sor->dpaux) { 1131 if (sor->aux) {
1132 err = tegra_dpaux_disable(sor->dpaux); 1132 err = drm_dp_aux_disable(sor->aux);
1133 if (err < 0) 1133 if (err < 0)
1134 dev_err(sor->dev, "failed to disable DP: %d\n", err); 1134 dev_err(sor->dev, "failed to disable DP: %d\n", err);
1135 } 1135 }
@@ -1196,7 +1196,7 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1196 struct tegra_sor *sor = to_sor(output); 1196 struct tegra_sor *sor = to_sor(output);
1197 struct tegra_sor_config config; 1197 struct tegra_sor_config config;
1198 struct drm_dp_link link; 1198 struct drm_dp_link link;
1199 struct drm_dp_aux *aux; 1199 u8 rate, lanes;
1200 int err = 0; 1200 int err = 0;
1201 u32 value; 1201 u32 value;
1202 1202
@@ -1209,20 +1209,14 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1209 if (output->panel) 1209 if (output->panel)
1210 drm_panel_prepare(output->panel); 1210 drm_panel_prepare(output->panel);
1211 1211
1212 /* FIXME: properly convert to struct drm_dp_aux */ 1212 err = drm_dp_aux_enable(sor->aux);
1213 aux = (struct drm_dp_aux *)sor->dpaux; 1213 if (err < 0)
1214 1214 dev_err(sor->dev, "failed to enable DP: %d\n", err);
1215 if (sor->dpaux) {
1216 err = tegra_dpaux_enable(sor->dpaux);
1217 if (err < 0)
1218 dev_err(sor->dev, "failed to enable DP: %d\n", err);
1219 1215
1220 err = drm_dp_link_probe(aux, &link); 1216 err = drm_dp_link_probe(sor->aux, &link);
1221 if (err < 0) { 1217 if (err < 0) {
1222 dev_err(sor->dev, "failed to probe eDP link: %d\n", 1218 dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
1223 err); 1219 return;
1224 return;
1225 }
1226 } 1220 }
1227 1221
1228 err = clk_set_parent(sor->clk, sor->clk_safe); 1222 err = clk_set_parent(sor->clk, sor->clk_safe);
@@ -1434,60 +1428,51 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1434 value |= SOR_DP_PADCTL_PAD_CAL_PD; 1428 value |= SOR_DP_PADCTL_PAD_CAL_PD;
1435 tegra_sor_writel(sor, value, SOR_DP_PADCTL0); 1429 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
1436 1430
1437 if (sor->dpaux) { 1431 err = drm_dp_link_probe(sor->aux, &link);
1438 u8 rate, lanes; 1432 if (err < 0)
1439 1433 dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
1440 err = drm_dp_link_probe(aux, &link);
1441 if (err < 0)
1442 dev_err(sor->dev, "failed to probe eDP link: %d\n",
1443 err);
1444 1434
1445 err = drm_dp_link_power_up(aux, &link); 1435 err = drm_dp_link_power_up(sor->aux, &link);
1446 if (err < 0) 1436 if (err < 0)
1447 dev_err(sor->dev, "failed to power up eDP link: %d\n", 1437 dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
1448 err);
1449 1438
1450 err = drm_dp_link_configure(aux, &link); 1439 err = drm_dp_link_configure(sor->aux, &link);
1451 if (err < 0) 1440 if (err < 0)
1452 dev_err(sor->dev, "failed to configure eDP link: %d\n", 1441 dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
1453 err);
1454 1442
1455 rate = drm_dp_link_rate_to_bw_code(link.rate); 1443 rate = drm_dp_link_rate_to_bw_code(link.rate);
1456 lanes = link.num_lanes; 1444 lanes = link.num_lanes;
1457 1445
1458 value = tegra_sor_readl(sor, SOR_CLK_CNTRL); 1446 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
1459 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; 1447 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
1460 value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); 1448 value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
1461 tegra_sor_writel(sor, value, SOR_CLK_CNTRL); 1449 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
1462 1450
1463 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); 1451 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
1464 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; 1452 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
1465 value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); 1453 value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
1466 1454
1467 if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) 1455 if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
1468 value |= SOR_DP_LINKCTL_ENHANCED_FRAME; 1456 value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
1469 1457
1470 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); 1458 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
1471 1459
1472 /* disable training pattern generator */ 1460 /* disable training pattern generator */
1473 1461
1474 for (i = 0; i < link.num_lanes; i++) { 1462 for (i = 0; i < link.num_lanes; i++) {
1475 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | 1463 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
1476 SOR_DP_TPG_SCRAMBLER_GALIOS | 1464 SOR_DP_TPG_SCRAMBLER_GALIOS |
1477 SOR_DP_TPG_PATTERN_NONE; 1465 SOR_DP_TPG_PATTERN_NONE;
1478 value = (value << 8) | lane; 1466 value = (value << 8) | lane;
1479 } 1467 }
1480 1468
1481 tegra_sor_writel(sor, value, SOR_DP_TPG); 1469 tegra_sor_writel(sor, value, SOR_DP_TPG);
1482 1470
1483 err = tegra_sor_dp_train_fast(sor, &link); 1471 err = tegra_sor_dp_train_fast(sor, &link);
1484 if (err < 0) { 1472 if (err < 0)
1485 dev_err(sor->dev, "DP fast link training failed: %d\n", 1473 dev_err(sor->dev, "DP fast link training failed: %d\n", err);
1486 err);
1487 }
1488 1474
1489 dev_dbg(sor->dev, "fast link training succeeded\n"); 1475 dev_dbg(sor->dev, "fast link training succeeded\n");
1490 }
1491 1476
1492 err = tegra_sor_power_up(sor, 250); 1477 err = tegra_sor_power_up(sor, 250);
1493 if (err < 0) 1478 if (err < 0)
@@ -1961,9 +1946,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
1961 1946
1962 /* production settings */ 1947 /* production settings */
1963 settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000); 1948 settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
1964 if (IS_ERR(settings)) { 1949 if (!settings) {
1965 dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n", 1950 dev_err(sor->dev, "no settings for pixel clock %d Hz\n",
1966 mode->clock * 1000, PTR_ERR(settings)); 1951 mode->clock * 1000);
1967 return; 1952 return;
1968 } 1953 }
1969 1954
@@ -2148,7 +2133,7 @@ static int tegra_sor_init(struct host1x_client *client)
2148 int encoder = DRM_MODE_ENCODER_NONE; 2133 int encoder = DRM_MODE_ENCODER_NONE;
2149 int err; 2134 int err;
2150 2135
2151 if (!sor->dpaux) { 2136 if (!sor->aux) {
2152 if (sor->soc->supports_hdmi) { 2137 if (sor->soc->supports_hdmi) {
2153 connector = DRM_MODE_CONNECTOR_HDMIA; 2138 connector = DRM_MODE_CONNECTOR_HDMIA;
2154 encoder = DRM_MODE_ENCODER_TMDS; 2139 encoder = DRM_MODE_ENCODER_TMDS;
@@ -2178,7 +2163,7 @@ static int tegra_sor_init(struct host1x_client *client)
2178 sor->output.connector.dpms = DRM_MODE_DPMS_OFF; 2163 sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
2179 2164
2180 drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, 2165 drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
2181 encoder); 2166 encoder, NULL);
2182 drm_encoder_helper_add(&sor->output.encoder, helpers); 2167 drm_encoder_helper_add(&sor->output.encoder, helpers);
2183 2168
2184 drm_mode_connector_attach_encoder(&sor->output.connector, 2169 drm_mode_connector_attach_encoder(&sor->output.connector,
@@ -2199,8 +2184,8 @@ static int tegra_sor_init(struct host1x_client *client)
2199 dev_err(sor->dev, "debugfs setup failed: %d\n", err); 2184 dev_err(sor->dev, "debugfs setup failed: %d\n", err);
2200 } 2185 }
2201 2186
2202 if (sor->dpaux) { 2187 if (sor->aux) {
2203 err = tegra_dpaux_attach(sor->dpaux, &sor->output); 2188 err = drm_dp_aux_attach(sor->aux, &sor->output);
2204 if (err < 0) { 2189 if (err < 0) {
2205 dev_err(sor->dev, "failed to attach DP: %d\n", err); 2190 dev_err(sor->dev, "failed to attach DP: %d\n", err);
2206 return err; 2191 return err;
@@ -2249,8 +2234,8 @@ static int tegra_sor_exit(struct host1x_client *client)
2249 2234
2250 tegra_output_exit(&sor->output); 2235 tegra_output_exit(&sor->output);
2251 2236
2252 if (sor->dpaux) { 2237 if (sor->aux) {
2253 err = tegra_dpaux_detach(sor->dpaux); 2238 err = drm_dp_aux_detach(sor->aux);
2254 if (err < 0) { 2239 if (err < 0) {
2255 dev_err(sor->dev, "failed to detach DP: %d\n", err); 2240 dev_err(sor->dev, "failed to detach DP: %d\n", err);
2256 return err; 2241 return err;
@@ -2399,14 +2384,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
2399 2384
2400 np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0); 2385 np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
2401 if (np) { 2386 if (np) {
2402 sor->dpaux = tegra_dpaux_find_by_of_node(np); 2387 sor->aux = drm_dp_aux_find_by_of_node(np);
2403 of_node_put(np); 2388 of_node_put(np);
2404 2389
2405 if (!sor->dpaux) 2390 if (!sor->aux)
2406 return -EPROBE_DEFER; 2391 return -EPROBE_DEFER;
2407 } 2392 }
2408 2393
2409 if (!sor->dpaux) { 2394 if (!sor->aux) {
2410 if (sor->soc->supports_hdmi) { 2395 if (sor->soc->supports_hdmi) {
2411 sor->ops = &tegra_sor_hdmi_ops; 2396 sor->ops = &tegra_sor_hdmi_ops;
2412 } else if (sor->soc->supports_lvds) { 2397 } else if (sor->soc->supports_lvds) {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 4ddb21e7f52f..d7f5b897c6c5 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -294,6 +294,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
294 break; 294 break;
295 } 295 }
296 296
297 drm_helper_disable_unused_functions(dev);
297 priv->fbdev = drm_fbdev_cma_init(dev, bpp, 298 priv->fbdev = drm_fbdev_cma_init(dev, bpp,
298 dev->mode_config.num_crtc, 299 dev->mode_config.num_crtc,
299 dev->mode_config.num_connector); 300 dev->mode_config.num_connector);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 0af8bed7ce1e..4dda6e2f464b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -128,7 +128,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
128 encoder->possible_crtcs = 1; 128 encoder->possible_crtcs = 1;
129 129
130 ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs, 130 ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
131 DRM_MODE_ENCODER_LVDS); 131 DRM_MODE_ENCODER_LVDS, NULL);
132 if (ret < 0) 132 if (ret < 0)
133 goto fail; 133 goto fail;
134 134
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 354c47ca6374..5052a8af7ecb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -138,7 +138,7 @@ static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
138 encoder->possible_crtcs = 1; 138 encoder->possible_crtcs = 1;
139 139
140 ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs, 140 ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
141 DRM_MODE_ENCODER_TMDS); 141 DRM_MODE_ENCODER_TMDS, NULL);
142 if (ret < 0) 142 if (ret < 0)
143 goto fail; 143 goto fail;
144 144
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 745e996d2dbc..4cbf26555093 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -176,7 +176,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
176 list_add_tail(&bo->lru, &man->lru); 176 list_add_tail(&bo->lru, &man->lru);
177 kref_get(&bo->list_kref); 177 kref_get(&bo->list_kref);
178 178
179 if (bo->ttm != NULL) { 179 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
180 list_add_tail(&bo->swap, &bo->glob->swap_lru); 180 list_add_tail(&bo->swap, &bo->glob->swap_lru);
181 kref_get(&bo->list_kref); 181 kref_get(&bo->list_kref);
182 } 182 }
@@ -228,6 +228,27 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
228} 228}
229EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 229EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
230 230
231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
232{
233 struct ttm_bo_device *bdev = bo->bdev;
234 struct ttm_mem_type_manager *man;
235
236 lockdep_assert_held(&bo->resv->lock.base);
237
238 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
239 list_del_init(&bo->swap);
240 list_del_init(&bo->lru);
241
242 } else {
243 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
244 list_move_tail(&bo->swap, &bo->glob->swap_lru);
245
246 man = &bdev->man[bo->mem.mem_type];
247 list_move_tail(&bo->lru, &man->lru);
248 }
249}
250EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
251
231/* 252/*
232 * Call bo->mutex locked. 253 * Call bo->mutex locked.
233 */ 254 */
@@ -1170,9 +1191,15 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1170 if (likely(!ret)) 1191 if (likely(!ret))
1171 ret = ttm_bo_validate(bo, placement, interruptible, false); 1192 ret = ttm_bo_validate(bo, placement, interruptible, false);
1172 1193
1173 if (!resv) 1194 if (!resv) {
1174 ttm_bo_unreserve(bo); 1195 ttm_bo_unreserve(bo);
1175 1196
1197 } else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1198 spin_lock(&bo->glob->lru_lock);
1199 ttm_bo_add_to_lru(bo);
1200 spin_unlock(&bo->glob->lru_lock);
1201 }
1202
1176 if (unlikely(ret)) 1203 if (unlikely(ret))
1177 ttm_bo_unref(&bo); 1204 ttm_bo_unref(&bo);
1178 1205
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 6a954544727f..f154fb1929bd 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -180,7 +180,7 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
180 spin_unlock(&lock->lock); 180 spin_unlock(&lock->lock);
181 } 181 }
182 } else 182 } else
183 wait_event(lock->queue, __ttm_read_lock(lock)); 183 wait_event(lock->queue, __ttm_write_lock(lock));
184 184
185 return ret; 185 return ret;
186} 186}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 0110d95522f3..4709b54c204c 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -122,13 +122,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
122 kfree(connector); 122 kfree(connector);
123} 123}
124 124
125static struct drm_connector_helper_funcs udl_connector_helper_funcs = { 125static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
126 .get_modes = udl_get_modes, 126 .get_modes = udl_get_modes,
127 .mode_valid = udl_mode_valid, 127 .mode_valid = udl_mode_valid,
128 .best_encoder = udl_best_single_encoder, 128 .best_encoder = udl_best_single_encoder,
129}; 129};
130 130
131static struct drm_connector_funcs udl_connector_funcs = { 131static const struct drm_connector_funcs udl_connector_funcs = {
132 .dpms = drm_helper_connector_dpms, 132 .dpms = drm_helper_connector_dpms,
133 .detect = udl_detect, 133 .detect = udl_detect,
134 .fill_modes = drm_helper_probe_single_connector_modes, 134 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 4052c4656498..a181a647fcf9 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -73,7 +73,8 @@ struct drm_encoder *udl_encoder_init(struct drm_device *dev)
73 if (!encoder) 73 if (!encoder)
74 return NULL; 74 return NULL;
75 75
76 drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS); 76 drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS,
77 NULL);
77 drm_encoder_helper_add(encoder, &udl_helper_funcs); 78 drm_encoder_helper_add(encoder, &udl_helper_funcs);
78 encoder->possible_crtcs = 1; 79 encoder->possible_crtcs = 1;
79 return encoder; 80 return encoder;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 677190a65e82..160ef2a08b89 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -400,7 +400,7 @@ static void udl_crtc_commit(struct drm_crtc *crtc)
400 udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 400 udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
401} 401}
402 402
403static struct drm_crtc_helper_funcs udl_helper_funcs = { 403static const struct drm_crtc_helper_funcs udl_helper_funcs = {
404 .dpms = udl_crtc_dpms, 404 .dpms = udl_crtc_dpms,
405 .mode_fixup = udl_crtc_mode_fixup, 405 .mode_fixup = udl_crtc_mode_fixup,
406 .mode_set = udl_crtc_mode_set, 406 .mode_set = udl_crtc_mode_set,
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 32b4f9cd8f52..4c6a99f0398c 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -8,10 +8,19 @@ vc4-y := \
8 vc4_crtc.o \ 8 vc4_crtc.o \
9 vc4_drv.o \ 9 vc4_drv.o \
10 vc4_kms.o \ 10 vc4_kms.o \
11 vc4_gem.o \
11 vc4_hdmi.o \ 12 vc4_hdmi.o \
12 vc4_hvs.o \ 13 vc4_hvs.o \
13 vc4_plane.o 14 vc4_irq.o \
15 vc4_plane.o \
16 vc4_render_cl.o \
17 vc4_trace_points.o \
18 vc4_v3d.o \
19 vc4_validate.o \
20 vc4_validate_shaders.o
14 21
15vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o 22vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
16 23
17obj-$(CONFIG_DRM_VC4) += vc4.o 24obj-$(CONFIG_DRM_VC4) += vc4.o
25
26CFLAGS_vc4_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index ab9f5108ae1a..18dfe3ec9a62 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -12,19 +12,236 @@
12 * access to system memory with no MMU in between. To support it, we 12 * access to system memory with no MMU in between. To support it, we
13 * use the GEM CMA helper functions to allocate contiguous ranges of 13 * use the GEM CMA helper functions to allocate contiguous ranges of
14 * physical memory for our BOs. 14 * physical memory for our BOs.
15 *
16 * Since the CMA allocator is very slow, we keep a cache of recently
17 * freed BOs around so that the kernel's allocation of objects for 3D
18 * rendering can return quickly.
15 */ 19 */
16 20
17#include "vc4_drv.h" 21#include "vc4_drv.h"
22#include "uapi/drm/vc4_drm.h"
23
24static void vc4_bo_stats_dump(struct vc4_dev *vc4)
25{
26 DRM_INFO("num bos allocated: %d\n",
27 vc4->bo_stats.num_allocated);
28 DRM_INFO("size bos allocated: %dkb\n",
29 vc4->bo_stats.size_allocated / 1024);
30 DRM_INFO("num bos used: %d\n",
31 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
32 DRM_INFO("size bos used: %dkb\n",
33 (vc4->bo_stats.size_allocated -
34 vc4->bo_stats.size_cached) / 1024);
35 DRM_INFO("num bos cached: %d\n",
36 vc4->bo_stats.num_cached);
37 DRM_INFO("size bos cached: %dkb\n",
38 vc4->bo_stats.size_cached / 1024);
39}
40
41#ifdef CONFIG_DEBUG_FS
42int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
43{
44 struct drm_info_node *node = (struct drm_info_node *)m->private;
45 struct drm_device *dev = node->minor->dev;
46 struct vc4_dev *vc4 = to_vc4_dev(dev);
47 struct vc4_bo_stats stats;
48
49 /* Take a snapshot of the current stats with the lock held. */
50 mutex_lock(&vc4->bo_lock);
51 stats = vc4->bo_stats;
52 mutex_unlock(&vc4->bo_lock);
53
54 seq_printf(m, "num bos allocated: %d\n",
55 stats.num_allocated);
56 seq_printf(m, "size bos allocated: %dkb\n",
57 stats.size_allocated / 1024);
58 seq_printf(m, "num bos used: %d\n",
59 stats.num_allocated - stats.num_cached);
60 seq_printf(m, "size bos used: %dkb\n",
61 (stats.size_allocated - stats.size_cached) / 1024);
62 seq_printf(m, "num bos cached: %d\n",
63 stats.num_cached);
64 seq_printf(m, "size bos cached: %dkb\n",
65 stats.size_cached / 1024);
66
67 return 0;
68}
69#endif
70
71static uint32_t bo_page_index(size_t size)
72{
73 return (size / PAGE_SIZE) - 1;
74}
75
76/* Must be called with bo_lock held. */
77static void vc4_bo_destroy(struct vc4_bo *bo)
78{
79 struct drm_gem_object *obj = &bo->base.base;
80 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
81
82 if (bo->validated_shader) {
83 kfree(bo->validated_shader->texture_samples);
84 kfree(bo->validated_shader);
85 bo->validated_shader = NULL;
86 }
87
88 vc4->bo_stats.num_allocated--;
89 vc4->bo_stats.size_allocated -= obj->size;
90 drm_gem_cma_free_object(obj);
91}
92
93/* Must be called with bo_lock held. */
94static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
95{
96 struct drm_gem_object *obj = &bo->base.base;
97 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
98
99 vc4->bo_stats.num_cached--;
100 vc4->bo_stats.size_cached -= obj->size;
101
102 list_del(&bo->unref_head);
103 list_del(&bo->size_head);
104}
105
106static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
107 size_t size)
108{
109 struct vc4_dev *vc4 = to_vc4_dev(dev);
110 uint32_t page_index = bo_page_index(size);
111
112 if (vc4->bo_cache.size_list_size <= page_index) {
113 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
114 page_index + 1);
115 struct list_head *new_list;
116 uint32_t i;
117
118 new_list = kmalloc_array(new_size, sizeof(struct list_head),
119 GFP_KERNEL);
120 if (!new_list)
121 return NULL;
122
123 /* Rebase the old cached BO lists to their new list
124 * head locations.
125 */
126 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
127 struct list_head *old_list =
128 &vc4->bo_cache.size_list[i];
129
130 if (list_empty(old_list))
131 INIT_LIST_HEAD(&new_list[i]);
132 else
133 list_replace(old_list, &new_list[i]);
134 }
135 /* And initialize the brand new BO list heads. */
136 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
137 INIT_LIST_HEAD(&new_list[i]);
138
139 kfree(vc4->bo_cache.size_list);
140 vc4->bo_cache.size_list = new_list;
141 vc4->bo_cache.size_list_size = new_size;
142 }
143
144 return &vc4->bo_cache.size_list[page_index];
145}
146
147void vc4_bo_cache_purge(struct drm_device *dev)
148{
149 struct vc4_dev *vc4 = to_vc4_dev(dev);
150
151 mutex_lock(&vc4->bo_lock);
152 while (!list_empty(&vc4->bo_cache.time_list)) {
153 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
154 struct vc4_bo, unref_head);
155 vc4_bo_remove_from_cache(bo);
156 vc4_bo_destroy(bo);
157 }
158 mutex_unlock(&vc4->bo_lock);
159}
160
161static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
162 uint32_t size)
163{
164 struct vc4_dev *vc4 = to_vc4_dev(dev);
165 uint32_t page_index = bo_page_index(size);
166 struct vc4_bo *bo = NULL;
167
168 size = roundup(size, PAGE_SIZE);
169
170 mutex_lock(&vc4->bo_lock);
171 if (page_index >= vc4->bo_cache.size_list_size)
172 goto out;
18 173
19struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size) 174 if (list_empty(&vc4->bo_cache.size_list[page_index]))
175 goto out;
176
177 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
178 struct vc4_bo, size_head);
179 vc4_bo_remove_from_cache(bo);
180 kref_init(&bo->base.base.refcount);
181
182out:
183 mutex_unlock(&vc4->bo_lock);
184 return bo;
185}
186
187/**
188 * vc4_gem_create_object - Implementation of driver->gem_create_object.
189 *
190 * This lets the CMA helpers allocate object structs for us, and keep
191 * our BO stats correct.
192 */
193struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
20{ 194{
195 struct vc4_dev *vc4 = to_vc4_dev(dev);
196 struct vc4_bo *bo;
197
198 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
199 if (!bo)
200 return ERR_PTR(-ENOMEM);
201
202 mutex_lock(&vc4->bo_lock);
203 vc4->bo_stats.num_allocated++;
204 vc4->bo_stats.size_allocated += size;
205 mutex_unlock(&vc4->bo_lock);
206
207 return &bo->base.base;
208}
209
210struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
211 bool from_cache)
212{
213 size_t size = roundup(unaligned_size, PAGE_SIZE);
214 struct vc4_dev *vc4 = to_vc4_dev(dev);
21 struct drm_gem_cma_object *cma_obj; 215 struct drm_gem_cma_object *cma_obj;
22 216
23 cma_obj = drm_gem_cma_create(dev, size); 217 if (size == 0)
24 if (IS_ERR(cma_obj))
25 return NULL; 218 return NULL;
26 else 219
27 return to_vc4_bo(&cma_obj->base); 220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) {
222 struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size);
223
224 if (bo)
225 return bo;
226 }
227
228 cma_obj = drm_gem_cma_create(dev, size);
229 if (IS_ERR(cma_obj)) {
230 /*
231 * If we've run out of CMA memory, kill the cache of
232 * CMA allocations we've got laying around and try again.
233 */
234 vc4_bo_cache_purge(dev);
235
236 cma_obj = drm_gem_cma_create(dev, size);
237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4);
240 return NULL;
241 }
242 }
243
244 return to_vc4_bo(&cma_obj->base);
28} 245}
29 246
30int vc4_dumb_create(struct drm_file *file_priv, 247int vc4_dumb_create(struct drm_file *file_priv,
@@ -41,7 +258,191 @@ int vc4_dumb_create(struct drm_file *file_priv,
41 if (args->size < args->pitch * args->height) 258 if (args->size < args->pitch * args->height)
42 args->size = args->pitch * args->height; 259 args->size = args->pitch * args->height;
43 260
44 bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE)); 261 bo = vc4_bo_create(dev, args->size, false);
262 if (!bo)
263 return -ENOMEM;
264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base);
267
268 return ret;
269}
270
271/* Must be called with bo_lock held. */
272static void vc4_bo_cache_free_old(struct drm_device *dev)
273{
274 struct vc4_dev *vc4 = to_vc4_dev(dev);
275 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
276
277 while (!list_empty(&vc4->bo_cache.time_list)) {
278 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
279 struct vc4_bo, unref_head);
280 if (time_before(expire_time, bo->free_time)) {
281 mod_timer(&vc4->bo_cache.time_timer,
282 round_jiffies_up(jiffies +
283 msecs_to_jiffies(1000)));
284 return;
285 }
286
287 vc4_bo_remove_from_cache(bo);
288 vc4_bo_destroy(bo);
289 }
290}
291
292/* Called on the last userspace/kernel unreference of the BO. Returns
293 * it to the BO cache if possible, otherwise frees it.
294 *
295 * Note that this is called with the struct_mutex held.
296 */
297void vc4_free_object(struct drm_gem_object *gem_bo)
298{
299 struct drm_device *dev = gem_bo->dev;
300 struct vc4_dev *vc4 = to_vc4_dev(dev);
301 struct vc4_bo *bo = to_vc4_bo(gem_bo);
302 struct list_head *cache_list;
303
304 mutex_lock(&vc4->bo_lock);
305 /* If the object references someone else's memory, we can't cache it.
306 */
307 if (gem_bo->import_attach) {
308 vc4_bo_destroy(bo);
309 goto out;
310 }
311
312 /* Don't cache if it was publicly named. */
313 if (gem_bo->name) {
314 vc4_bo_destroy(bo);
315 goto out;
316 }
317
318 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
319 if (!cache_list) {
320 vc4_bo_destroy(bo);
321 goto out;
322 }
323
324 if (bo->validated_shader) {
325 kfree(bo->validated_shader->texture_samples);
326 kfree(bo->validated_shader);
327 bo->validated_shader = NULL;
328 }
329
330 bo->free_time = jiffies;
331 list_add(&bo->size_head, cache_list);
332 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
333
334 vc4->bo_stats.num_cached++;
335 vc4->bo_stats.size_cached += gem_bo->size;
336
337 vc4_bo_cache_free_old(dev);
338
339out:
340 mutex_unlock(&vc4->bo_lock);
341}
342
343static void vc4_bo_cache_time_work(struct work_struct *work)
344{
345 struct vc4_dev *vc4 =
346 container_of(work, struct vc4_dev, bo_cache.time_work);
347 struct drm_device *dev = vc4->dev;
348
349 mutex_lock(&vc4->bo_lock);
350 vc4_bo_cache_free_old(dev);
351 mutex_unlock(&vc4->bo_lock);
352}
353
354static void vc4_bo_cache_time_timer(unsigned long data)
355{
356 struct drm_device *dev = (struct drm_device *)data;
357 struct vc4_dev *vc4 = to_vc4_dev(dev);
358
359 schedule_work(&vc4->bo_cache.time_work);
360}
361
362struct dma_buf *
363vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
364{
365 struct vc4_bo *bo = to_vc4_bo(obj);
366
367 if (bo->validated_shader) {
368 DRM_ERROR("Attempting to export shader BO\n");
369 return ERR_PTR(-EINVAL);
370 }
371
372 return drm_gem_prime_export(dev, obj, flags);
373}
374
375int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
376{
377 struct drm_gem_object *gem_obj;
378 struct vc4_bo *bo;
379 int ret;
380
381 ret = drm_gem_mmap(filp, vma);
382 if (ret)
383 return ret;
384
385 gem_obj = vma->vm_private_data;
386 bo = to_vc4_bo(gem_obj);
387
388 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
389 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
390 return -EINVAL;
391 }
392
393 /*
394 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
395 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
396 * the whole buffer.
397 */
398 vma->vm_flags &= ~VM_PFNMAP;
399 vma->vm_pgoff = 0;
400
401 ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
402 bo->base.vaddr, bo->base.paddr,
403 vma->vm_end - vma->vm_start);
404 if (ret)
405 drm_gem_vm_close(vma);
406
407 return ret;
408}
409
410int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
411{
412 struct vc4_bo *bo = to_vc4_bo(obj);
413
414 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
415 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
416 return -EINVAL;
417 }
418
419 return drm_gem_cma_prime_mmap(obj, vma);
420}
421
422void *vc4_prime_vmap(struct drm_gem_object *obj)
423{
424 struct vc4_bo *bo = to_vc4_bo(obj);
425
426 if (bo->validated_shader) {
427 DRM_ERROR("mmaping of shader BOs not allowed.\n");
428 return ERR_PTR(-EINVAL);
429 }
430
431 return drm_gem_cma_prime_vmap(obj);
432}
433
434int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
436{
437 struct drm_vc4_create_bo *args = data;
438 struct vc4_bo *bo = NULL;
439 int ret;
440
441 /*
442 * We can't allocate from the BO cache, because the BOs don't
443 * get zeroed, and that might leak data between users.
444 */
445 bo = vc4_bo_create(dev, args->size, false);
45 if (!bo) 446 if (!bo)
46 return -ENOMEM; 447 return -ENOMEM;
47 448
@@ -50,3 +451,107 @@ int vc4_dumb_create(struct drm_file *file_priv,
50 451
51 return ret; 452 return ret;
52} 453}
454
455int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
456 struct drm_file *file_priv)
457{
458 struct drm_vc4_mmap_bo *args = data;
459 struct drm_gem_object *gem_obj;
460
461 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
462 if (!gem_obj) {
463 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
464 return -EINVAL;
465 }
466
467 /* The mmap offset was set up at BO allocation time. */
468 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
469
470 drm_gem_object_unreference_unlocked(gem_obj);
471 return 0;
472}
473
474int
475vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
476 struct drm_file *file_priv)
477{
478 struct drm_vc4_create_shader_bo *args = data;
479 struct vc4_bo *bo = NULL;
480 int ret;
481
482 if (args->size == 0)
483 return -EINVAL;
484
485 if (args->size % sizeof(u64) != 0)
486 return -EINVAL;
487
488 if (args->flags != 0) {
489 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
490 return -EINVAL;
491 }
492
493 if (args->pad != 0) {
494 DRM_INFO("Pad set: 0x%08x\n", args->pad);
495 return -EINVAL;
496 }
497
498 bo = vc4_bo_create(dev, args->size, true);
499 if (!bo)
500 return -ENOMEM;
501
502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data,
504 args->size);
505 if (ret != 0)
506 goto fail;
507 /* Clear the rest of the memory from allocating from the BO
508 * cache.
509 */
510 memset(bo->base.vaddr + args->size, 0,
511 bo->base.base.size - args->size);
512
513 bo->validated_shader = vc4_validate_shader(&bo->base);
514 if (!bo->validated_shader) {
515 ret = -EINVAL;
516 goto fail;
517 }
518
519 /* We have to create the handle after validation, to avoid
520 * races for users to do doing things like mmap the shader BO.
521 */
522 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
523
524 fail:
525 drm_gem_object_unreference_unlocked(&bo->base.base);
526
527 return ret;
528}
529
530void vc4_bo_cache_init(struct drm_device *dev)
531{
532 struct vc4_dev *vc4 = to_vc4_dev(dev);
533
534 mutex_init(&vc4->bo_lock);
535
536 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
537
538 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
539 setup_timer(&vc4->bo_cache.time_timer,
540 vc4_bo_cache_time_timer,
541 (unsigned long)dev);
542}
543
544void vc4_bo_cache_destroy(struct drm_device *dev)
545{
546 struct vc4_dev *vc4 = to_vc4_dev(dev);
547
548 del_timer(&vc4->bo_cache.time_timer);
549 cancel_work_sync(&vc4->bo_cache.time_work);
550
551 vc4_bo_cache_purge(dev);
552
553 if (vc4->bo_stats.num_allocated) {
554 DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
555 vc4_bo_stats_dump(vc4);
556 }
557}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 265064c62d49..018145e0b87d 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
35#include "drm_atomic_helper.h" 35#include "drm_atomic_helper.h"
36#include "drm_crtc_helper.h" 36#include "drm_crtc_helper.h"
37#include "linux/clk.h" 37#include "linux/clk.h"
38#include "drm_fb_cma_helper.h"
38#include "linux/component.h" 39#include "linux/component.h"
39#include "linux/of_device.h" 40#include "linux/of_device.h"
40#include "vc4_drv.h" 41#include "vc4_drv.h"
@@ -327,7 +328,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
327 /* The pixelvalve can only feed one encoder (and encoders are 328 /* The pixelvalve can only feed one encoder (and encoders are
328 * 1:1 with connectors.) 329 * 1:1 with connectors.)
329 */ 330 */
330 if (drm_atomic_connectors_for_crtc(state->state, crtc) > 1) 331 if (hweight32(state->connector_mask) > 1)
331 return -EINVAL; 332 return -EINVAL;
332 333
333 drm_atomic_crtc_state_for_each_plane(plane, state) { 334 drm_atomic_crtc_state_for_each_plane(plane, state) {
@@ -476,10 +477,106 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
476 return ret; 477 return ret;
477} 478}
478 479
480struct vc4_async_flip_state {
481 struct drm_crtc *crtc;
482 struct drm_framebuffer *fb;
483 struct drm_pending_vblank_event *event;
484
485 struct vc4_seqno_cb cb;
486};
487
488/* Called when the V3D execution for the BO being flipped to is done, so that
489 * we can actually update the plane's address to point to it.
490 */
491static void
492vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
493{
494 struct vc4_async_flip_state *flip_state =
495 container_of(cb, struct vc4_async_flip_state, cb);
496 struct drm_crtc *crtc = flip_state->crtc;
497 struct drm_device *dev = crtc->dev;
498 struct vc4_dev *vc4 = to_vc4_dev(dev);
499 struct drm_plane *plane = crtc->primary;
500
501 vc4_plane_async_set_fb(plane, flip_state->fb);
502 if (flip_state->event) {
503 unsigned long flags;
504
505 spin_lock_irqsave(&dev->event_lock, flags);
506 drm_crtc_send_vblank_event(crtc, flip_state->event);
507 spin_unlock_irqrestore(&dev->event_lock, flags);
508 }
509
510 drm_framebuffer_unreference(flip_state->fb);
511 kfree(flip_state);
512
513 up(&vc4->async_modeset);
514}
515
516/* Implements async (non-vblank-synced) page flips.
517 *
518 * The page flip ioctl needs to return immediately, so we grab the
519 * modeset semaphore on the pipe, and queue the address update for
520 * when V3D is done with the BO being flipped to.
521 */
522static int vc4_async_page_flip(struct drm_crtc *crtc,
523 struct drm_framebuffer *fb,
524 struct drm_pending_vblank_event *event,
525 uint32_t flags)
526{
527 struct drm_device *dev = crtc->dev;
528 struct vc4_dev *vc4 = to_vc4_dev(dev);
529 struct drm_plane *plane = crtc->primary;
530 int ret = 0;
531 struct vc4_async_flip_state *flip_state;
532 struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
533 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
534
535 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
536 if (!flip_state)
537 return -ENOMEM;
538
539 drm_framebuffer_reference(fb);
540 flip_state->fb = fb;
541 flip_state->crtc = crtc;
542 flip_state->event = event;
543
544 /* Make sure all other async modesetes have landed. */
545 ret = down_interruptible(&vc4->async_modeset);
546 if (ret) {
547 kfree(flip_state);
548 return ret;
549 }
550
551 /* Immediately update the plane's legacy fb pointer, so that later
552 * modeset prep sees the state that will be present when the semaphore
553 * is released.
554 */
555 drm_atomic_set_fb_for_plane(plane->state, fb);
556 plane->fb = fb;
557
558 vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
559 vc4_async_page_flip_complete);
560
561 /* Driver takes ownership of state on successful async commit. */
562 return 0;
563}
564
565static int vc4_page_flip(struct drm_crtc *crtc,
566 struct drm_framebuffer *fb,
567 struct drm_pending_vblank_event *event,
568 uint32_t flags)
569{
570 if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
571 return vc4_async_page_flip(crtc, fb, event, flags);
572 else
573 return drm_atomic_helper_page_flip(crtc, fb, event, flags);
574}
575
479static const struct drm_crtc_funcs vc4_crtc_funcs = { 576static const struct drm_crtc_funcs vc4_crtc_funcs = {
480 .set_config = drm_atomic_helper_set_config, 577 .set_config = drm_atomic_helper_set_config,
481 .destroy = vc4_crtc_destroy, 578 .destroy = vc4_crtc_destroy,
482 .page_flip = drm_atomic_helper_page_flip, 579 .page_flip = vc4_page_flip,
483 .set_property = NULL, 580 .set_property = NULL,
484 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 581 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
485 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 582 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
@@ -606,7 +703,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
606 } 703 }
607 704
608 drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane, 705 drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane,
609 &vc4_crtc_funcs); 706 &vc4_crtc_funcs, NULL);
610 drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs); 707 drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
611 primary_plane->crtc = crtc; 708 primary_plane->crtc = crtc;
612 cursor_plane->crtc = crtc; 709 cursor_plane->crtc = crtc;
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 4297b0a5b74e..d76ad10b07fd 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -16,11 +16,14 @@
16#include "vc4_regs.h" 16#include "vc4_regs.h"
17 17
18static const struct drm_info_list vc4_debugfs_list[] = { 18static const struct drm_info_list vc4_debugfs_list[] = {
19 {"bo_stats", vc4_bo_stats_debugfs, 0},
19 {"hdmi_regs", vc4_hdmi_debugfs_regs, 0}, 20 {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
20 {"hvs_regs", vc4_hvs_debugfs_regs, 0}, 21 {"hvs_regs", vc4_hvs_debugfs_regs, 0},
21 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, 22 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
22 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, 23 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
23 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2}, 24 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
25 {"v3d_ident", vc4_v3d_debugfs_ident, 0},
26 {"v3d_regs", vc4_v3d_debugfs_regs, 0},
24}; 27};
25 28
26#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list) 29#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index d5db9e0f3b73..f1655fff8425 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -16,6 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include "drm_fb_cma_helper.h" 17#include "drm_fb_cma_helper.h"
18 18
19#include "uapi/drm/vc4_drm.h"
19#include "vc4_drv.h" 20#include "vc4_drv.h"
20#include "vc4_regs.h" 21#include "vc4_regs.h"
21 22
@@ -63,7 +64,7 @@ static const struct file_operations vc4_drm_fops = {
63 .open = drm_open, 64 .open = drm_open,
64 .release = drm_release, 65 .release = drm_release,
65 .unlocked_ioctl = drm_ioctl, 66 .unlocked_ioctl = drm_ioctl,
66 .mmap = drm_gem_cma_mmap, 67 .mmap = vc4_mmap,
67 .poll = drm_poll, 68 .poll = drm_poll,
68 .read = drm_read, 69 .read = drm_read,
69#ifdef CONFIG_COMPAT 70#ifdef CONFIG_COMPAT
@@ -73,16 +74,30 @@ static const struct file_operations vc4_drm_fops = {
73}; 74};
74 75
75static const struct drm_ioctl_desc vc4_drm_ioctls[] = { 76static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
77 DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
78 DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
79 DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
80 DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
81 DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
82 DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
83 DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
84 DRM_ROOT_ONLY),
76}; 85};
77 86
78static struct drm_driver vc4_drm_driver = { 87static struct drm_driver vc4_drm_driver = {
79 .driver_features = (DRIVER_MODESET | 88 .driver_features = (DRIVER_MODESET |
80 DRIVER_ATOMIC | 89 DRIVER_ATOMIC |
81 DRIVER_GEM | 90 DRIVER_GEM |
91 DRIVER_HAVE_IRQ |
82 DRIVER_PRIME), 92 DRIVER_PRIME),
83 .lastclose = vc4_lastclose, 93 .lastclose = vc4_lastclose,
84 .preclose = vc4_drm_preclose, 94 .preclose = vc4_drm_preclose,
85 95
96 .irq_handler = vc4_irq,
97 .irq_preinstall = vc4_irq_preinstall,
98 .irq_postinstall = vc4_irq_postinstall,
99 .irq_uninstall = vc4_irq_uninstall,
100
86 .enable_vblank = vc4_enable_vblank, 101 .enable_vblank = vc4_enable_vblank,
87 .disable_vblank = vc4_disable_vblank, 102 .disable_vblank = vc4_disable_vblank,
88 .get_vblank_counter = drm_vblank_count, 103 .get_vblank_counter = drm_vblank_count,
@@ -92,18 +107,19 @@ static struct drm_driver vc4_drm_driver = {
92 .debugfs_cleanup = vc4_debugfs_cleanup, 107 .debugfs_cleanup = vc4_debugfs_cleanup,
93#endif 108#endif
94 109
95 .gem_free_object = drm_gem_cma_free_object, 110 .gem_create_object = vc4_create_object,
111 .gem_free_object = vc4_free_object,
96 .gem_vm_ops = &drm_gem_cma_vm_ops, 112 .gem_vm_ops = &drm_gem_cma_vm_ops,
97 113
98 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 114 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
99 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 115 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
100 .gem_prime_import = drm_gem_prime_import, 116 .gem_prime_import = drm_gem_prime_import,
101 .gem_prime_export = drm_gem_prime_export, 117 .gem_prime_export = vc4_prime_export,
102 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 118 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
103 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 119 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
104 .gem_prime_vmap = drm_gem_cma_prime_vmap, 120 .gem_prime_vmap = vc4_prime_vmap,
105 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 121 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
106 .gem_prime_mmap = drm_gem_cma_prime_mmap, 122 .gem_prime_mmap = vc4_prime_mmap,
107 123
108 .dumb_create = vc4_dumb_create, 124 .dumb_create = vc4_dumb_create,
109 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 125 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -168,15 +184,17 @@ static int vc4_drm_bind(struct device *dev)
168 vc4->dev = drm; 184 vc4->dev = drm;
169 drm->dev_private = vc4; 185 drm->dev_private = vc4;
170 186
171 drm_dev_set_unique(drm, dev_name(dev)); 187 vc4_bo_cache_init(drm);
172 188
173 drm_mode_config_init(drm); 189 drm_mode_config_init(drm);
174 if (ret) 190 if (ret)
175 goto unref; 191 goto unref;
176 192
193 vc4_gem_init(drm);
194
177 ret = component_bind_all(dev, drm); 195 ret = component_bind_all(dev, drm);
178 if (ret) 196 if (ret)
179 goto unref; 197 goto gem_destroy;
180 198
181 ret = drm_dev_register(drm, 0); 199 ret = drm_dev_register(drm, 0);
182 if (ret < 0) 200 if (ret < 0)
@@ -200,8 +218,11 @@ unregister:
200 drm_dev_unregister(drm); 218 drm_dev_unregister(drm);
201unbind_all: 219unbind_all:
202 component_unbind_all(dev, drm); 220 component_unbind_all(dev, drm);
221gem_destroy:
222 vc4_gem_destroy(drm);
203unref: 223unref:
204 drm_dev_unref(drm); 224 drm_dev_unref(drm);
225 vc4_bo_cache_destroy(drm);
205 return ret; 226 return ret;
206} 227}
207 228
@@ -228,6 +249,7 @@ static struct platform_driver *const component_drivers[] = {
228 &vc4_hdmi_driver, 249 &vc4_hdmi_driver,
229 &vc4_crtc_driver, 250 &vc4_crtc_driver,
230 &vc4_hvs_driver, 251 &vc4_hvs_driver,
252 &vc4_v3d_driver,
231}; 253};
232 254
233static int vc4_platform_drm_probe(struct platform_device *pdev) 255static int vc4_platform_drm_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index fd8319fa682e..080865ec2bae 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -15,8 +15,89 @@ struct vc4_dev {
15 struct vc4_hdmi *hdmi; 15 struct vc4_hdmi *hdmi;
16 struct vc4_hvs *hvs; 16 struct vc4_hvs *hvs;
17 struct vc4_crtc *crtc[3]; 17 struct vc4_crtc *crtc[3];
18 struct vc4_v3d *v3d;
18 19
19 struct drm_fbdev_cma *fbdev; 20 struct drm_fbdev_cma *fbdev;
21
22 struct vc4_hang_state *hang_state;
23
24 /* The kernel-space BO cache. Tracks buffers that have been
25 * unreferenced by all other users (refcounts of 0!) but not
26 * yet freed, so we can do cheap allocations.
27 */
28 struct vc4_bo_cache {
29 /* Array of list heads for entries in the BO cache,
30 * based on number of pages, so we can do O(1) lookups
31 * in the cache when allocating.
32 */
33 struct list_head *size_list;
34 uint32_t size_list_size;
35
36 /* List of all BOs in the cache, ordered by age, so we
37 * can do O(1) lookups when trying to free old
38 * buffers.
39 */
40 struct list_head time_list;
41 struct work_struct time_work;
42 struct timer_list time_timer;
43 } bo_cache;
44
45 struct vc4_bo_stats {
46 u32 num_allocated;
47 u32 size_allocated;
48 u32 num_cached;
49 u32 size_cached;
50 } bo_stats;
51
52 /* Protects bo_cache and the BO stats. */
53 struct mutex bo_lock;
54
55 /* Sequence number for the last job queued in job_list.
56 * Starts at 0 (no jobs emitted).
57 */
58 uint64_t emit_seqno;
59
60 /* Sequence number for the last completed job on the GPU.
61 * Starts at 0 (no jobs completed).
62 */
63 uint64_t finished_seqno;
64
65 /* List of all struct vc4_exec_info for jobs to be executed.
66 * The first job in the list is the one currently programmed
67 * into ct0ca/ct1ca for execution.
68 */
69 struct list_head job_list;
70 /* List of the finished vc4_exec_infos waiting to be freed by
71 * job_done_work.
72 */
73 struct list_head job_done_list;
74 /* Spinlock used to synchronize the job_list and seqno
75 * accesses between the IRQ handler and GEM ioctls.
76 */
77 spinlock_t job_lock;
78 wait_queue_head_t job_wait_queue;
79 struct work_struct job_done_work;
80
81 /* List of struct vc4_seqno_cb for callbacks to be made from a
82 * workqueue when the given seqno is passed.
83 */
84 struct list_head seqno_cb_list;
85
86 /* The binner overflow memory that's currently set up in
87 * BPOA/BPOS registers. When overflow occurs and a new one is
88 * allocated, the previous one will be moved to
89 * vc4->current_exec's free list.
90 */
91 struct vc4_bo *overflow_mem;
92 struct work_struct overflow_mem_work;
93
94 struct {
95 uint32_t last_ct0ca, last_ct1ca;
96 struct timer_list timer;
97 struct work_struct reset_work;
98 } hangcheck;
99
100 struct semaphore async_modeset;
20}; 101};
21 102
22static inline struct vc4_dev * 103static inline struct vc4_dev *
@@ -27,6 +108,25 @@ to_vc4_dev(struct drm_device *dev)
27 108
28struct vc4_bo { 109struct vc4_bo {
29 struct drm_gem_cma_object base; 110 struct drm_gem_cma_object base;
111
112 /* seqno of the last job to render to this BO. */
113 uint64_t seqno;
114
115 /* List entry for the BO's position in either
116 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
117 */
118 struct list_head unref_head;
119
120 /* Time in jiffies when the BO was put in vc4->bo_cache. */
121 unsigned long free_time;
122
123 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
124 struct list_head size_head;
125
126 /* Struct for shader validation state, if created by
127 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
128 */
129 struct vc4_validated_shader_info *validated_shader;
30}; 130};
31 131
32static inline struct vc4_bo * 132static inline struct vc4_bo *
@@ -35,6 +135,17 @@ to_vc4_bo(struct drm_gem_object *bo)
35 return (struct vc4_bo *)bo; 135 return (struct vc4_bo *)bo;
36} 136}
37 137
138struct vc4_seqno_cb {
139 struct work_struct work;
140 uint64_t seqno;
141 void (*func)(struct vc4_seqno_cb *cb);
142};
143
144struct vc4_v3d {
145 struct platform_device *pdev;
146 void __iomem *regs;
147};
148
38struct vc4_hvs { 149struct vc4_hvs {
39 struct platform_device *pdev; 150 struct platform_device *pdev;
40 void __iomem *regs; 151 void __iomem *regs;
@@ -72,9 +183,142 @@ to_vc4_encoder(struct drm_encoder *encoder)
72 return container_of(encoder, struct vc4_encoder, base); 183 return container_of(encoder, struct vc4_encoder, base);
73} 184}
74 185
186#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
187#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
75#define HVS_READ(offset) readl(vc4->hvs->regs + offset) 188#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
76#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 189#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
77 190
191struct vc4_exec_info {
192 /* Sequence number for this bin/render job. */
193 uint64_t seqno;
194
195 /* Kernel-space copy of the ioctl arguments */
196 struct drm_vc4_submit_cl *args;
197
198 /* This is the array of BOs that were looked up at the start of exec.
199 * Command validation will use indices into this array.
200 */
201 struct drm_gem_cma_object **bo;
202 uint32_t bo_count;
203
204 /* Pointers for our position in vc4->job_list */
205 struct list_head head;
206
207 /* List of other BOs used in the job that need to be released
208 * once the job is complete.
209 */
210 struct list_head unref_list;
211
212 /* Current unvalidated indices into @bo loaded by the non-hardware
213 * VC4_PACKET_GEM_HANDLES.
214 */
215 uint32_t bo_index[2];
216
217 /* This is the BO where we store the validated command lists, shader
218 * records, and uniforms.
219 */
220 struct drm_gem_cma_object *exec_bo;
221
222 /**
223 * This tracks the per-shader-record state (packet 64) that
224 * determines the length of the shader record and the offset
225 * it's expected to be found at. It gets read in from the
226 * command lists.
227 */
228 struct vc4_shader_state {
229 uint32_t addr;
230 /* Maximum vertex index referenced by any primitive using this
231 * shader state.
232 */
233 uint32_t max_index;
234 } *shader_state;
235
236 /** How many shader states the user declared they were using. */
237 uint32_t shader_state_size;
238 /** How many shader state records the validator has seen. */
239 uint32_t shader_state_count;
240
241 bool found_tile_binning_mode_config_packet;
242 bool found_start_tile_binning_packet;
243 bool found_increment_semaphore_packet;
244 bool found_flush;
245 uint8_t bin_tiles_x, bin_tiles_y;
246 struct drm_gem_cma_object *tile_bo;
247 uint32_t tile_alloc_offset;
248
249 /**
250 * Computed addresses pointing into exec_bo where we start the
251 * bin thread (ct0) and render thread (ct1).
252 */
253 uint32_t ct0ca, ct0ea;
254 uint32_t ct1ca, ct1ea;
255
256 /* Pointer to the unvalidated bin CL (if present). */
257 void *bin_u;
258
259 /* Pointers to the shader recs. These paddr gets incremented as CL
260 * packets are relocated in validate_gl_shader_state, and the vaddrs
261 * (u and v) get incremented and size decremented as the shader recs
262 * themselves are validated.
263 */
264 void *shader_rec_u;
265 void *shader_rec_v;
266 uint32_t shader_rec_p;
267 uint32_t shader_rec_size;
268
269 /* Pointers to the uniform data. These pointers are incremented, and
270 * size decremented, as each batch of uniforms is uploaded.
271 */
272 void *uniforms_u;
273 void *uniforms_v;
274 uint32_t uniforms_p;
275 uint32_t uniforms_size;
276};
277
278static inline struct vc4_exec_info *
279vc4_first_job(struct vc4_dev *vc4)
280{
281 if (list_empty(&vc4->job_list))
282 return NULL;
283 return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
284}
285
286/**
287 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
288 * setup parameters.
289 *
290 * This will be used at draw time to relocate the reference to the texture
291 * contents in p0, and validate that the offset combined with
292 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
293 * Note that the hardware treats unprovided config parameters as 0, so not all
294 * of them need to be set up for every texure sample, and we'll store ~0 as
295 * the offset to mark the unused ones.
296 *
297 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
298 * Setup") for definitions of the texture parameters.
299 */
300struct vc4_texture_sample_info {
301 bool is_direct;
302 uint32_t p_offset[4];
303};
304
305/**
306 * struct vc4_validated_shader_info - information about validated shaders that
307 * needs to be used from command list validation.
308 *
309 * For a given shader, each time a shader state record references it, we need
310 * to verify that the shader doesn't read more uniforms than the shader state
311 * record's uniform BO pointer can provide, and we need to apply relocations
312 * and validate the shader state record's uniforms that define the texture
313 * samples.
314 */
315struct vc4_validated_shader_info {
316 uint32_t uniforms_size;
317 uint32_t uniforms_src_size;
318 uint32_t num_texture_samples;
319 struct vc4_texture_sample_info *texture_samples;
320};
321
78/** 322/**
79 * _wait_for - magic (register) wait macro 323 * _wait_for - magic (register) wait macro
80 * 324 *
@@ -104,13 +348,29 @@ to_vc4_encoder(struct drm_encoder *encoder)
104#define wait_for(COND, MS) _wait_for(COND, MS, 1) 348#define wait_for(COND, MS) _wait_for(COND, MS, 1)
105 349
106/* vc4_bo.c */ 350/* vc4_bo.c */
351struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
107void vc4_free_object(struct drm_gem_object *gem_obj); 352void vc4_free_object(struct drm_gem_object *gem_obj);
108struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size); 353struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
354 bool from_cache);
109int vc4_dumb_create(struct drm_file *file_priv, 355int vc4_dumb_create(struct drm_file *file_priv,
110 struct drm_device *dev, 356 struct drm_device *dev,
111 struct drm_mode_create_dumb *args); 357 struct drm_mode_create_dumb *args);
112struct dma_buf *vc4_prime_export(struct drm_device *dev, 358struct dma_buf *vc4_prime_export(struct drm_device *dev,
113 struct drm_gem_object *obj, int flags); 359 struct drm_gem_object *obj, int flags);
360int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
361 struct drm_file *file_priv);
362int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
363 struct drm_file *file_priv);
364int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
365 struct drm_file *file_priv);
366int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
367 struct drm_file *file_priv);
368int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
369int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
370void *vc4_prime_vmap(struct drm_gem_object *obj);
371void vc4_bo_cache_init(struct drm_device *dev);
372void vc4_bo_cache_destroy(struct drm_device *dev);
373int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
114 374
115/* vc4_crtc.c */ 375/* vc4_crtc.c */
116extern struct platform_driver vc4_crtc_driver; 376extern struct platform_driver vc4_crtc_driver;
@@ -126,10 +386,34 @@ void vc4_debugfs_cleanup(struct drm_minor *minor);
126/* vc4_drv.c */ 386/* vc4_drv.c */
127void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 387void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
128 388
389/* vc4_gem.c */
390void vc4_gem_init(struct drm_device *dev);
391void vc4_gem_destroy(struct drm_device *dev);
392int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *file_priv);
394int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file_priv);
396int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
397 struct drm_file *file_priv);
398void vc4_submit_next_job(struct drm_device *dev);
399int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
400 uint64_t timeout_ns, bool interruptible);
401void vc4_job_handle_completed(struct vc4_dev *vc4);
402int vc4_queue_seqno_cb(struct drm_device *dev,
403 struct vc4_seqno_cb *cb, uint64_t seqno,
404 void (*func)(struct vc4_seqno_cb *cb));
405
129/* vc4_hdmi.c */ 406/* vc4_hdmi.c */
130extern struct platform_driver vc4_hdmi_driver; 407extern struct platform_driver vc4_hdmi_driver;
131int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); 408int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
132 409
410/* vc4_irq.c */
411irqreturn_t vc4_irq(int irq, void *arg);
412void vc4_irq_preinstall(struct drm_device *dev);
413int vc4_irq_postinstall(struct drm_device *dev);
414void vc4_irq_uninstall(struct drm_device *dev);
415void vc4_irq_reset(struct drm_device *dev);
416
133/* vc4_hvs.c */ 417/* vc4_hvs.c */
134extern struct platform_driver vc4_hvs_driver; 418extern struct platform_driver vc4_hvs_driver;
135void vc4_hvs_dump_state(struct drm_device *dev); 419void vc4_hvs_dump_state(struct drm_device *dev);
@@ -143,3 +427,35 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
143 enum drm_plane_type type); 427 enum drm_plane_type type);
144u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 428u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
145u32 vc4_plane_dlist_size(struct drm_plane_state *state); 429u32 vc4_plane_dlist_size(struct drm_plane_state *state);
430void vc4_plane_async_set_fb(struct drm_plane *plane,
431 struct drm_framebuffer *fb);
432
433/* vc4_v3d.c */
434extern struct platform_driver vc4_v3d_driver;
435int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
436int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
437int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
438
439/* vc4_validate.c */
440int
441vc4_validate_bin_cl(struct drm_device *dev,
442 void *validated,
443 void *unvalidated,
444 struct vc4_exec_info *exec);
445
446int
447vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
448
449struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
450 uint32_t hindex);
451
452int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
453
454bool vc4_check_tex_size(struct vc4_exec_info *exec,
455 struct drm_gem_cma_object *fbo,
456 uint32_t offset, uint8_t tiling_format,
457 uint32_t width, uint32_t height, uint8_t cpp);
458
459/* vc4_validate_shader.c */
460struct vc4_validated_shader_info *
461vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
new file mode 100644
index 000000000000..48ce30a6f4b5
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -0,0 +1,866 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/device.h>
27#include <linux/io.h>
28
29#include "uapi/drm/vc4_drm.h"
30#include "vc4_drv.h"
31#include "vc4_regs.h"
32#include "vc4_trace.h"
33
34static void
35vc4_queue_hangcheck(struct drm_device *dev)
36{
37 struct vc4_dev *vc4 = to_vc4_dev(dev);
38
39 mod_timer(&vc4->hangcheck.timer,
40 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
41}
42
43struct vc4_hang_state {
44 struct drm_vc4_get_hang_state user_state;
45
46 u32 bo_count;
47 struct drm_gem_object **bo;
48};
49
50static void
51vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
52{
53 unsigned int i;
54
55 mutex_lock(&dev->struct_mutex);
56 for (i = 0; i < state->user_state.bo_count; i++)
57 drm_gem_object_unreference(state->bo[i]);
58 mutex_unlock(&dev->struct_mutex);
59
60 kfree(state);
61}
62
63int
64vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv)
66{
67 struct drm_vc4_get_hang_state *get_state = data;
68 struct drm_vc4_get_hang_state_bo *bo_state;
69 struct vc4_hang_state *kernel_state;
70 struct drm_vc4_get_hang_state *state;
71 struct vc4_dev *vc4 = to_vc4_dev(dev);
72 unsigned long irqflags;
73 u32 i;
74 int ret = 0;
75
76 spin_lock_irqsave(&vc4->job_lock, irqflags);
77 kernel_state = vc4->hang_state;
78 if (!kernel_state) {
79 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
80 return -ENOENT;
81 }
82 state = &kernel_state->user_state;
83
84 /* If the user's array isn't big enough, just return the
85 * required array size.
86 */
87 if (get_state->bo_count < state->bo_count) {
88 get_state->bo_count = state->bo_count;
89 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
90 return 0;
91 }
92
93 vc4->hang_state = NULL;
94 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
95
96 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
97 state->bo = get_state->bo;
98 memcpy(get_state, state, sizeof(*state));
99
100 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
101 if (!bo_state) {
102 ret = -ENOMEM;
103 goto err_free;
104 }
105
106 for (i = 0; i < state->bo_count; i++) {
107 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
108 u32 handle;
109
110 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
111 &handle);
112
113 if (ret) {
114 state->bo_count = i - 1;
115 goto err;
116 }
117 bo_state[i].handle = handle;
118 bo_state[i].paddr = vc4_bo->base.paddr;
119 bo_state[i].size = vc4_bo->base.base.size;
120 }
121
122 if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
123 bo_state,
124 state->bo_count * sizeof(*bo_state)))
125 ret = -EFAULT;
126
127 kfree(bo_state);
128
129err_free:
130
131 vc4_free_hang_state(dev, kernel_state);
132
133err:
134 return ret;
135}
136
137static void
138vc4_save_hang_state(struct drm_device *dev)
139{
140 struct vc4_dev *vc4 = to_vc4_dev(dev);
141 struct drm_vc4_get_hang_state *state;
142 struct vc4_hang_state *kernel_state;
143 struct vc4_exec_info *exec;
144 struct vc4_bo *bo;
145 unsigned long irqflags;
146 unsigned int i, unref_list_count;
147
148 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
149 if (!kernel_state)
150 return;
151
152 state = &kernel_state->user_state;
153
154 spin_lock_irqsave(&vc4->job_lock, irqflags);
155 exec = vc4_first_job(vc4);
156 if (!exec) {
157 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
158 return;
159 }
160
161 unref_list_count = 0;
162 list_for_each_entry(bo, &exec->unref_list, unref_head)
163 unref_list_count++;
164
165 state->bo_count = exec->bo_count + unref_list_count;
166 kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo),
167 GFP_ATOMIC);
168 if (!kernel_state->bo) {
169 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
170 return;
171 }
172
173 for (i = 0; i < exec->bo_count; i++) {
174 drm_gem_object_reference(&exec->bo[i]->base);
175 kernel_state->bo[i] = &exec->bo[i]->base;
176 }
177
178 list_for_each_entry(bo, &exec->unref_list, unref_head) {
179 drm_gem_object_reference(&bo->base.base);
180 kernel_state->bo[i] = &bo->base.base;
181 i++;
182 }
183
184 state->start_bin = exec->ct0ca;
185 state->start_render = exec->ct1ca;
186
187 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
188
189 state->ct0ca = V3D_READ(V3D_CTNCA(0));
190 state->ct0ea = V3D_READ(V3D_CTNEA(0));
191
192 state->ct1ca = V3D_READ(V3D_CTNCA(1));
193 state->ct1ea = V3D_READ(V3D_CTNEA(1));
194
195 state->ct0cs = V3D_READ(V3D_CTNCS(0));
196 state->ct1cs = V3D_READ(V3D_CTNCS(1));
197
198 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
199 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
200
201 state->bpca = V3D_READ(V3D_BPCA);
202 state->bpcs = V3D_READ(V3D_BPCS);
203 state->bpoa = V3D_READ(V3D_BPOA);
204 state->bpos = V3D_READ(V3D_BPOS);
205
206 state->vpmbase = V3D_READ(V3D_VPMBASE);
207
208 state->dbge = V3D_READ(V3D_DBGE);
209 state->fdbgo = V3D_READ(V3D_FDBGO);
210 state->fdbgb = V3D_READ(V3D_FDBGB);
211 state->fdbgr = V3D_READ(V3D_FDBGR);
212 state->fdbgs = V3D_READ(V3D_FDBGS);
213 state->errstat = V3D_READ(V3D_ERRSTAT);
214
215 spin_lock_irqsave(&vc4->job_lock, irqflags);
216 if (vc4->hang_state) {
217 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
218 vc4_free_hang_state(dev, kernel_state);
219 } else {
220 vc4->hang_state = kernel_state;
221 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
222 }
223}
224
225static void
226vc4_reset(struct drm_device *dev)
227{
228 struct vc4_dev *vc4 = to_vc4_dev(dev);
229
230 DRM_INFO("Resetting GPU.\n");
231 vc4_v3d_set_power(vc4, false);
232 vc4_v3d_set_power(vc4, true);
233
234 vc4_irq_reset(dev);
235
236 /* Rearm the hangcheck -- another job might have been waiting
237 * for our hung one to get kicked off, and vc4_irq_reset()
238 * would have started it.
239 */
240 vc4_queue_hangcheck(dev);
241}
242
243static void
244vc4_reset_work(struct work_struct *work)
245{
246 struct vc4_dev *vc4 =
247 container_of(work, struct vc4_dev, hangcheck.reset_work);
248
249 vc4_save_hang_state(vc4->dev);
250
251 vc4_reset(vc4->dev);
252}
253
254static void
255vc4_hangcheck_elapsed(unsigned long data)
256{
257 struct drm_device *dev = (struct drm_device *)data;
258 struct vc4_dev *vc4 = to_vc4_dev(dev);
259 uint32_t ct0ca, ct1ca;
260
261 /* If idle, we can stop watching for hangs. */
262 if (list_empty(&vc4->job_list))
263 return;
264
265 ct0ca = V3D_READ(V3D_CTNCA(0));
266 ct1ca = V3D_READ(V3D_CTNCA(1));
267
268 /* If we've made any progress in execution, rearm the timer
269 * and wait.
270 */
271 if (ct0ca != vc4->hangcheck.last_ct0ca ||
272 ct1ca != vc4->hangcheck.last_ct1ca) {
273 vc4->hangcheck.last_ct0ca = ct0ca;
274 vc4->hangcheck.last_ct1ca = ct1ca;
275 vc4_queue_hangcheck(dev);
276 return;
277 }
278
279 /* We've gone too long with no progress, reset. This has to
280 * be done from a work struct, since resetting can sleep and
281 * this timer hook isn't allowed to.
282 */
283 schedule_work(&vc4->hangcheck.reset_work);
284}
285
286static void
287submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
288{
289 struct vc4_dev *vc4 = to_vc4_dev(dev);
290
291 /* Set the current and end address of the control list.
292 * Writing the end register is what starts the job.
293 */
294 V3D_WRITE(V3D_CTNCA(thread), start);
295 V3D_WRITE(V3D_CTNEA(thread), end);
296}
297
298int
299vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
300 bool interruptible)
301{
302 struct vc4_dev *vc4 = to_vc4_dev(dev);
303 int ret = 0;
304 unsigned long timeout_expire;
305 DEFINE_WAIT(wait);
306
307 if (vc4->finished_seqno >= seqno)
308 return 0;
309
310 if (timeout_ns == 0)
311 return -ETIME;
312
313 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
314
315 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
316 for (;;) {
317 prepare_to_wait(&vc4->job_wait_queue, &wait,
318 interruptible ? TASK_INTERRUPTIBLE :
319 TASK_UNINTERRUPTIBLE);
320
321 if (interruptible && signal_pending(current)) {
322 ret = -ERESTARTSYS;
323 break;
324 }
325
326 if (vc4->finished_seqno >= seqno)
327 break;
328
329 if (timeout_ns != ~0ull) {
330 if (time_after_eq(jiffies, timeout_expire)) {
331 ret = -ETIME;
332 break;
333 }
334 schedule_timeout(timeout_expire - jiffies);
335 } else {
336 schedule();
337 }
338 }
339
340 finish_wait(&vc4->job_wait_queue, &wait);
341 trace_vc4_wait_for_seqno_end(dev, seqno);
342
343 if (ret && ret != -ERESTARTSYS) {
344 DRM_ERROR("timeout waiting for render thread idle\n");
345 return ret;
346 }
347
348 return 0;
349}
350
351static void
352vc4_flush_caches(struct drm_device *dev)
353{
354 struct vc4_dev *vc4 = to_vc4_dev(dev);
355
356 /* Flush the GPU L2 caches. These caches sit on top of system
357 * L3 (the 128kb or so shared with the CPU), and are
358 * non-allocating in the L3.
359 */
360 V3D_WRITE(V3D_L2CACTL,
361 V3D_L2CACTL_L2CCLR);
362
363 V3D_WRITE(V3D_SLCACTL,
364 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
365 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
366 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
367 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
368}
369
370/* Sets the registers for the next job to be actually be executed in
371 * the hardware.
372 *
373 * The job_lock should be held during this.
374 */
375void
376vc4_submit_next_job(struct drm_device *dev)
377{
378 struct vc4_dev *vc4 = to_vc4_dev(dev);
379 struct vc4_exec_info *exec = vc4_first_job(vc4);
380
381 if (!exec)
382 return;
383
384 vc4_flush_caches(dev);
385
386 /* Disable the binner's pre-loaded overflow memory address */
387 V3D_WRITE(V3D_BPOA, 0);
388 V3D_WRITE(V3D_BPOS, 0);
389
390 if (exec->ct0ca != exec->ct0ea)
391 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
392 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
393}
394
395static void
396vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
397{
398 struct vc4_bo *bo;
399 unsigned i;
400
401 for (i = 0; i < exec->bo_count; i++) {
402 bo = to_vc4_bo(&exec->bo[i]->base);
403 bo->seqno = seqno;
404 }
405
406 list_for_each_entry(bo, &exec->unref_list, unref_head) {
407 bo->seqno = seqno;
408 }
409}
410
411/* Queues a struct vc4_exec_info for execution. If no job is
412 * currently executing, then submits it.
413 *
414 * Unlike most GPUs, our hardware only handles one command list at a
415 * time. To queue multiple jobs at once, we'd need to edit the
416 * previous command list to have a jump to the new one at the end, and
417 * then bump the end address. That's a change for a later date,
418 * though.
419 */
420static void
421vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
422{
423 struct vc4_dev *vc4 = to_vc4_dev(dev);
424 uint64_t seqno;
425 unsigned long irqflags;
426
427 spin_lock_irqsave(&vc4->job_lock, irqflags);
428
429 seqno = ++vc4->emit_seqno;
430 exec->seqno = seqno;
431 vc4_update_bo_seqnos(exec, seqno);
432
433 list_add_tail(&exec->head, &vc4->job_list);
434
435 /* If no job was executing, kick ours off. Otherwise, it'll
436 * get started when the previous job's frame done interrupt
437 * occurs.
438 */
439 if (vc4_first_job(vc4) == exec) {
440 vc4_submit_next_job(dev);
441 vc4_queue_hangcheck(dev);
442 }
443
444 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
445}
446
447/**
448 * Looks up a bunch of GEM handles for BOs and stores the array for
449 * use in the command validator that actually writes relocated
450 * addresses pointing to them.
451 */
452static int
453vc4_cl_lookup_bos(struct drm_device *dev,
454 struct drm_file *file_priv,
455 struct vc4_exec_info *exec)
456{
457 struct drm_vc4_submit_cl *args = exec->args;
458 uint32_t *handles;
459 int ret = 0;
460 int i;
461
462 exec->bo_count = args->bo_handle_count;
463
464 if (!exec->bo_count) {
465 /* See comment on bo_index for why we have to check
466 * this.
467 */
468 DRM_ERROR("Rendering requires BOs to validate\n");
469 return -EINVAL;
470 }
471
472 exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
473 GFP_KERNEL);
474 if (!exec->bo) {
475 DRM_ERROR("Failed to allocate validated BO pointers\n");
476 return -ENOMEM;
477 }
478
479 handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
480 if (!handles) {
481 DRM_ERROR("Failed to allocate incoming GEM handles\n");
482 goto fail;
483 }
484
485 ret = copy_from_user(handles,
486 (void __user *)(uintptr_t)args->bo_handles,
487 exec->bo_count * sizeof(uint32_t));
488 if (ret) {
489 DRM_ERROR("Failed to copy in GEM handles\n");
490 goto fail;
491 }
492
493 spin_lock(&file_priv->table_lock);
494 for (i = 0; i < exec->bo_count; i++) {
495 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
496 handles[i]);
497 if (!bo) {
498 DRM_ERROR("Failed to look up GEM BO %d: %d\n",
499 i, handles[i]);
500 ret = -EINVAL;
501 spin_unlock(&file_priv->table_lock);
502 goto fail;
503 }
504 drm_gem_object_reference(bo);
505 exec->bo[i] = (struct drm_gem_cma_object *)bo;
506 }
507 spin_unlock(&file_priv->table_lock);
508
509fail:
510 kfree(handles);
511 return 0;
512}
513
514static int
515vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
516{
517 struct drm_vc4_submit_cl *args = exec->args;
518 void *temp = NULL;
519 void *bin;
520 int ret = 0;
521 uint32_t bin_offset = 0;
522 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
523 16);
524 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
525 uint32_t exec_size = uniforms_offset + args->uniforms_size;
526 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
527 args->shader_rec_count);
528 struct vc4_bo *bo;
529
530 if (uniforms_offset < shader_rec_offset ||
531 exec_size < uniforms_offset ||
532 args->shader_rec_count >= (UINT_MAX /
533 sizeof(struct vc4_shader_state)) ||
534 temp_size < exec_size) {
535 DRM_ERROR("overflow in exec arguments\n");
536 goto fail;
537 }
538
539 /* Allocate space where we'll store the copied in user command lists
540 * and shader records.
541 *
542 * We don't just copy directly into the BOs because we need to
543 * read the contents back for validation, and I think the
544 * bo->vaddr is uncached access.
545 */
546 temp = kmalloc(temp_size, GFP_KERNEL);
547 if (!temp) {
548 DRM_ERROR("Failed to allocate storage for copying "
549 "in bin/render CLs.\n");
550 ret = -ENOMEM;
551 goto fail;
552 }
553 bin = temp + bin_offset;
554 exec->shader_rec_u = temp + shader_rec_offset;
555 exec->uniforms_u = temp + uniforms_offset;
556 exec->shader_state = temp + exec_size;
557 exec->shader_state_size = args->shader_rec_count;
558
559 if (copy_from_user(bin,
560 (void __user *)(uintptr_t)args->bin_cl,
561 args->bin_cl_size)) {
562 ret = -EFAULT;
563 goto fail;
564 }
565
566 if (copy_from_user(exec->shader_rec_u,
567 (void __user *)(uintptr_t)args->shader_rec,
568 args->shader_rec_size)) {
569 ret = -EFAULT;
570 goto fail;
571 }
572
573 if (copy_from_user(exec->uniforms_u,
574 (void __user *)(uintptr_t)args->uniforms,
575 args->uniforms_size)) {
576 ret = -EFAULT;
577 goto fail;
578 }
579
580 bo = vc4_bo_create(dev, exec_size, true);
581 if (!bo) {
582 DRM_ERROR("Couldn't allocate BO for binning\n");
583 ret = -ENOMEM;
584 goto fail;
585 }
586 exec->exec_bo = &bo->base;
587
588 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
589 &exec->unref_list);
590
591 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
592
593 exec->bin_u = bin;
594
595 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
596 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
597 exec->shader_rec_size = args->shader_rec_size;
598
599 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
600 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
601 exec->uniforms_size = args->uniforms_size;
602
603 ret = vc4_validate_bin_cl(dev,
604 exec->exec_bo->vaddr + bin_offset,
605 bin,
606 exec);
607 if (ret)
608 goto fail;
609
610 ret = vc4_validate_shader_recs(dev, exec);
611
612fail:
613 kfree(temp);
614 return ret;
615}
616
617static void
618vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
619{
620 unsigned i;
621
622 /* Need the struct lock for drm_gem_object_unreference(). */
623 mutex_lock(&dev->struct_mutex);
624 if (exec->bo) {
625 for (i = 0; i < exec->bo_count; i++)
626 drm_gem_object_unreference(&exec->bo[i]->base);
627 kfree(exec->bo);
628 }
629
630 while (!list_empty(&exec->unref_list)) {
631 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
632 struct vc4_bo, unref_head);
633 list_del(&bo->unref_head);
634 drm_gem_object_unreference(&bo->base.base);
635 }
636 mutex_unlock(&dev->struct_mutex);
637
638 kfree(exec);
639}
640
641void
642vc4_job_handle_completed(struct vc4_dev *vc4)
643{
644 unsigned long irqflags;
645 struct vc4_seqno_cb *cb, *cb_temp;
646
647 spin_lock_irqsave(&vc4->job_lock, irqflags);
648 while (!list_empty(&vc4->job_done_list)) {
649 struct vc4_exec_info *exec =
650 list_first_entry(&vc4->job_done_list,
651 struct vc4_exec_info, head);
652 list_del(&exec->head);
653
654 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
655 vc4_complete_exec(vc4->dev, exec);
656 spin_lock_irqsave(&vc4->job_lock, irqflags);
657 }
658
659 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
660 if (cb->seqno <= vc4->finished_seqno) {
661 list_del_init(&cb->work.entry);
662 schedule_work(&cb->work);
663 }
664 }
665
666 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
667}
668
669static void vc4_seqno_cb_work(struct work_struct *work)
670{
671 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
672
673 cb->func(cb);
674}
675
676int vc4_queue_seqno_cb(struct drm_device *dev,
677 struct vc4_seqno_cb *cb, uint64_t seqno,
678 void (*func)(struct vc4_seqno_cb *cb))
679{
680 struct vc4_dev *vc4 = to_vc4_dev(dev);
681 int ret = 0;
682 unsigned long irqflags;
683
684 cb->func = func;
685 INIT_WORK(&cb->work, vc4_seqno_cb_work);
686
687 spin_lock_irqsave(&vc4->job_lock, irqflags);
688 if (seqno > vc4->finished_seqno) {
689 cb->seqno = seqno;
690 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
691 } else {
692 schedule_work(&cb->work);
693 }
694 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
695
696 return ret;
697}
698
699/* Scheduled when any job has been completed, this walks the list of
700 * jobs that had completed and unrefs their BOs and frees their exec
701 * structs.
702 */
703static void
704vc4_job_done_work(struct work_struct *work)
705{
706 struct vc4_dev *vc4 =
707 container_of(work, struct vc4_dev, job_done_work);
708
709 vc4_job_handle_completed(vc4);
710}
711
712static int
713vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
714 uint64_t seqno,
715 uint64_t *timeout_ns)
716{
717 unsigned long start = jiffies;
718 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
719
720 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
721 uint64_t delta = jiffies_to_nsecs(jiffies - start);
722
723 if (*timeout_ns >= delta)
724 *timeout_ns -= delta;
725 }
726
727 return ret;
728}
729
730int
731vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
732 struct drm_file *file_priv)
733{
734 struct drm_vc4_wait_seqno *args = data;
735
736 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
737 &args->timeout_ns);
738}
739
740int
741vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
742 struct drm_file *file_priv)
743{
744 int ret;
745 struct drm_vc4_wait_bo *args = data;
746 struct drm_gem_object *gem_obj;
747 struct vc4_bo *bo;
748
749 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
750 if (!gem_obj) {
751 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
752 return -EINVAL;
753 }
754 bo = to_vc4_bo(gem_obj);
755
756 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
757 &args->timeout_ns);
758
759 drm_gem_object_unreference_unlocked(gem_obj);
760 return ret;
761}
762
763/**
764 * Submits a command list to the VC4.
765 *
766 * This is what is called batchbuffer emitting on other hardware.
767 */
768int
769vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
770 struct drm_file *file_priv)
771{
772 struct vc4_dev *vc4 = to_vc4_dev(dev);
773 struct drm_vc4_submit_cl *args = data;
774 struct vc4_exec_info *exec;
775 int ret;
776
777 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
778 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
779 return -EINVAL;
780 }
781
782 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
783 if (!exec) {
784 DRM_ERROR("malloc failure on exec struct\n");
785 return -ENOMEM;
786 }
787
788 exec->args = args;
789 INIT_LIST_HEAD(&exec->unref_list);
790
791 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
792 if (ret)
793 goto fail;
794
795 if (exec->args->bin_cl_size != 0) {
796 ret = vc4_get_bcl(dev, exec);
797 if (ret)
798 goto fail;
799 } else {
800 exec->ct0ca = 0;
801 exec->ct0ea = 0;
802 }
803
804 ret = vc4_get_rcl(dev, exec);
805 if (ret)
806 goto fail;
807
808 /* Clear this out of the struct we'll be putting in the queue,
809 * since it's part of our stack.
810 */
811 exec->args = NULL;
812
813 vc4_queue_submit(dev, exec);
814
815 /* Return the seqno for our job. */
816 args->seqno = vc4->emit_seqno;
817
818 return 0;
819
820fail:
821 vc4_complete_exec(vc4->dev, exec);
822
823 return ret;
824}
825
826void
827vc4_gem_init(struct drm_device *dev)
828{
829 struct vc4_dev *vc4 = to_vc4_dev(dev);
830
831 INIT_LIST_HEAD(&vc4->job_list);
832 INIT_LIST_HEAD(&vc4->job_done_list);
833 INIT_LIST_HEAD(&vc4->seqno_cb_list);
834 spin_lock_init(&vc4->job_lock);
835
836 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
837 setup_timer(&vc4->hangcheck.timer,
838 vc4_hangcheck_elapsed,
839 (unsigned long)dev);
840
841 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
842}
843
844void
845vc4_gem_destroy(struct drm_device *dev)
846{
847 struct vc4_dev *vc4 = to_vc4_dev(dev);
848
849 /* Waiting for exec to finish would need to be done before
850 * unregistering V3D.
851 */
852 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
853
854 /* V3D should already have disabled its interrupt and cleared
855 * the overflow allocation registers. Now free the object.
856 */
857 if (vc4->overflow_mem) {
858 drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
859 vc4->overflow_mem = NULL;
860 }
861
862 vc4_bo_cache_destroy(dev);
863
864 if (vc4->hang_state)
865 vc4_free_hang_state(dev, vc4->hang_state);
866}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index da9a36d6e1d1..c69c0460196b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -519,7 +519,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
519 WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0); 519 WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0);
520 520
521 drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs, 521 drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
522 DRM_MODE_ENCODER_TMDS); 522 DRM_MODE_ENCODER_TMDS, NULL);
523 drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs); 523 drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
524 524
525 hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder); 525 hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
new file mode 100644
index 000000000000..b68060e758db
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/** DOC: Interrupt management for the V3D engine.
25 *
26 * We have an interrupt status register (V3D_INTCTL) which reports
27 * interrupts, and where writing 1 bits clears those interrupts.
28 * There are also a pair of interrupt registers
29 * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
30 * disables that specific interrupt, and 0s written are ignored
31 * (reading either one returns the set of enabled interrupts).
32 *
33 * When we take a render frame interrupt, we need to wake the
34 * processes waiting for some frame to be done, and get the next frame
35 * submitted ASAP (so the hardware doesn't sit idle when there's work
36 * to do).
37 *
38 * When we take the binner out of memory interrupt, we need to
39 * allocate some new memory and pass it to the binner so that the
40 * current job can make progress.
41 */
42
43#include "vc4_drv.h"
44#include "vc4_regs.h"
45
46#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
47 V3D_INT_FRDONE)
48
49DECLARE_WAIT_QUEUE_HEAD(render_wait);
50
51static void
52vc4_overflow_mem_work(struct work_struct *work)
53{
54 struct vc4_dev *vc4 =
55 container_of(work, struct vc4_dev, overflow_mem_work);
56 struct drm_device *dev = vc4->dev;
57 struct vc4_bo *bo;
58
59 bo = vc4_bo_create(dev, 256 * 1024, true);
60 if (!bo) {
61 DRM_ERROR("Couldn't allocate binner overflow mem\n");
62 return;
63 }
64
65 /* If there's a job executing currently, then our previous
66 * overflow allocation is getting used in that job and we need
67 * to queue it to be released when the job is done. But if no
68 * job is executing at all, then we can free the old overflow
69 * object direcctly.
70 *
71 * No lock necessary for this pointer since we're the only
72 * ones that update the pointer, and our workqueue won't
73 * reenter.
74 */
75 if (vc4->overflow_mem) {
76 struct vc4_exec_info *current_exec;
77 unsigned long irqflags;
78
79 spin_lock_irqsave(&vc4->job_lock, irqflags);
80 current_exec = vc4_first_job(vc4);
81 if (current_exec) {
82 vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
83 list_add_tail(&vc4->overflow_mem->unref_head,
84 &current_exec->unref_list);
85 vc4->overflow_mem = NULL;
86 }
87 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
88 }
89
90 if (vc4->overflow_mem)
91 drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
92 vc4->overflow_mem = bo;
93
94 V3D_WRITE(V3D_BPOA, bo->base.paddr);
95 V3D_WRITE(V3D_BPOS, bo->base.base.size);
96 V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
97 V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
98}
99
100static void
101vc4_irq_finish_job(struct drm_device *dev)
102{
103 struct vc4_dev *vc4 = to_vc4_dev(dev);
104 struct vc4_exec_info *exec = vc4_first_job(vc4);
105
106 if (!exec)
107 return;
108
109 vc4->finished_seqno++;
110 list_move_tail(&exec->head, &vc4->job_done_list);
111 vc4_submit_next_job(dev);
112
113 wake_up_all(&vc4->job_wait_queue);
114 schedule_work(&vc4->job_done_work);
115}
116
117irqreturn_t
118vc4_irq(int irq, void *arg)
119{
120 struct drm_device *dev = arg;
121 struct vc4_dev *vc4 = to_vc4_dev(dev);
122 uint32_t intctl;
123 irqreturn_t status = IRQ_NONE;
124
125 barrier();
126 intctl = V3D_READ(V3D_INTCTL);
127
128 /* Acknowledge the interrupts we're handling here. The render
129 * frame done interrupt will be cleared, while OUTOMEM will
130 * stay high until the underlying cause is cleared.
131 */
132 V3D_WRITE(V3D_INTCTL, intctl);
133
134 if (intctl & V3D_INT_OUTOMEM) {
135 /* Disable OUTOMEM until the work is done. */
136 V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
137 schedule_work(&vc4->overflow_mem_work);
138 status = IRQ_HANDLED;
139 }
140
141 if (intctl & V3D_INT_FRDONE) {
142 spin_lock(&vc4->job_lock);
143 vc4_irq_finish_job(dev);
144 spin_unlock(&vc4->job_lock);
145 status = IRQ_HANDLED;
146 }
147
148 return status;
149}
150
151void
152vc4_irq_preinstall(struct drm_device *dev)
153{
154 struct vc4_dev *vc4 = to_vc4_dev(dev);
155
156 init_waitqueue_head(&vc4->job_wait_queue);
157 INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
158
159 /* Clear any pending interrupts someone might have left around
160 * for us.
161 */
162 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
163}
164
165int
166vc4_irq_postinstall(struct drm_device *dev)
167{
168 struct vc4_dev *vc4 = to_vc4_dev(dev);
169
170 /* Enable both the render done and out of memory interrupts. */
171 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
172
173 return 0;
174}
175
176void
177vc4_irq_uninstall(struct drm_device *dev)
178{
179 struct vc4_dev *vc4 = to_vc4_dev(dev);
180
181 /* Disable sending interrupts for our driver's IRQs. */
182 V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
183
184 /* Clear any pending interrupts we might have left. */
185 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
186
187 cancel_work_sync(&vc4->overflow_mem_work);
188}
189
190/** Reinitializes interrupt registers when a GPU reset is performed. */
191void vc4_irq_reset(struct drm_device *dev)
192{
193 struct vc4_dev *vc4 = to_vc4_dev(dev);
194 unsigned long irqflags;
195
196 /* Acknowledge any stale IRQs. */
197 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
198
199 /*
200 * Turn all our interrupts on. Binner out of memory is the
201 * only one we expect to trigger at this point, since we've
202 * just come from poweron and haven't supplied any overflow
203 * memory yet.
204 */
205 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
206
207 spin_lock_irqsave(&vc4->job_lock, irqflags);
208 vc4_irq_finish_job(dev);
209 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
210}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 2e5597d10cc6..f95f2df5f8d1 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "drm_crtc.h" 17#include "drm_crtc.h"
18#include "drm_atomic.h"
18#include "drm_atomic_helper.h" 19#include "drm_atomic_helper.h"
19#include "drm_crtc_helper.h" 20#include "drm_crtc_helper.h"
20#include "drm_plane_helper.h" 21#include "drm_plane_helper.h"
@@ -29,10 +30,152 @@ static void vc4_output_poll_changed(struct drm_device *dev)
29 drm_fbdev_cma_hotplug_event(vc4->fbdev); 30 drm_fbdev_cma_hotplug_event(vc4->fbdev);
30} 31}
31 32
33struct vc4_commit {
34 struct drm_device *dev;
35 struct drm_atomic_state *state;
36 struct vc4_seqno_cb cb;
37};
38
39static void
40vc4_atomic_complete_commit(struct vc4_commit *c)
41{
42 struct drm_atomic_state *state = c->state;
43 struct drm_device *dev = state->dev;
44 struct vc4_dev *vc4 = to_vc4_dev(dev);
45
46 drm_atomic_helper_commit_modeset_disables(dev, state);
47
48 drm_atomic_helper_commit_planes(dev, state, false);
49
50 drm_atomic_helper_commit_modeset_enables(dev, state);
51
52 drm_atomic_helper_wait_for_vblanks(dev, state);
53
54 drm_atomic_helper_cleanup_planes(dev, state);
55
56 drm_atomic_state_free(state);
57
58 up(&vc4->async_modeset);
59
60 kfree(c);
61}
62
63static void
64vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
65{
66 struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
67
68 vc4_atomic_complete_commit(c);
69}
70
71static struct vc4_commit *commit_init(struct drm_atomic_state *state)
72{
73 struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
74
75 if (!c)
76 return NULL;
77 c->dev = state->dev;
78 c->state = state;
79
80 return c;
81}
82
83/**
84 * vc4_atomic_commit - commit validated state object
85 * @dev: DRM device
86 * @state: the driver state object
87 * @async: asynchronous commit
88 *
89 * This function commits a with drm_atomic_helper_check() pre-validated state
90 * object. This can still fail when e.g. the framebuffer reservation fails. For
91 * now this doesn't implement asynchronous commits.
92 *
93 * RETURNS
94 * Zero for success or -errno.
95 */
96static int vc4_atomic_commit(struct drm_device *dev,
97 struct drm_atomic_state *state,
98 bool async)
99{
100 struct vc4_dev *vc4 = to_vc4_dev(dev);
101 int ret;
102 int i;
103 uint64_t wait_seqno = 0;
104 struct vc4_commit *c;
105
106 c = commit_init(state);
107 if (!c)
108 return -ENOMEM;
109
110 /* Make sure that any outstanding modesets have finished. */
111 ret = down_interruptible(&vc4->async_modeset);
112 if (ret) {
113 kfree(c);
114 return ret;
115 }
116
117 ret = drm_atomic_helper_prepare_planes(dev, state);
118 if (ret) {
119 kfree(c);
120 up(&vc4->async_modeset);
121 return ret;
122 }
123
124 for (i = 0; i < dev->mode_config.num_total_plane; i++) {
125 struct drm_plane *plane = state->planes[i];
126 struct drm_plane_state *new_state = state->plane_states[i];
127
128 if (!plane)
129 continue;
130
131 if ((plane->state->fb != new_state->fb) && new_state->fb) {
132 struct drm_gem_cma_object *cma_bo =
133 drm_fb_cma_get_gem_obj(new_state->fb, 0);
134 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
135
136 wait_seqno = max(bo->seqno, wait_seqno);
137 }
138 }
139
140 /*
141 * This is the point of no return - everything below never fails except
142 * when the hw goes bonghits. Which means we can commit the new state on
143 * the software side now.
144 */
145
146 drm_atomic_helper_swap_state(dev, state);
147
148 /*
149 * Everything below can be run asynchronously without the need to grab
150 * any modeset locks at all under one condition: It must be guaranteed
151 * that the asynchronous work has either been cancelled (if the driver
152 * supports it, which at least requires that the framebuffers get
153 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
154 * before the new state gets committed on the software side with
155 * drm_atomic_helper_swap_state().
156 *
157 * This scheme allows new atomic state updates to be prepared and
158 * checked in parallel to the asynchronous completion of the previous
159 * update. Which is important since compositors need to figure out the
160 * composition of the next frame right after having submitted the
161 * current layout.
162 */
163
164 if (async) {
165 vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
166 vc4_atomic_complete_commit_seqno_cb);
167 } else {
168 vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
169 vc4_atomic_complete_commit(c);
170 }
171
172 return 0;
173}
174
32static const struct drm_mode_config_funcs vc4_mode_funcs = { 175static const struct drm_mode_config_funcs vc4_mode_funcs = {
33 .output_poll_changed = vc4_output_poll_changed, 176 .output_poll_changed = vc4_output_poll_changed,
34 .atomic_check = drm_atomic_helper_check, 177 .atomic_check = drm_atomic_helper_check,
35 .atomic_commit = drm_atomic_helper_commit, 178 .atomic_commit = vc4_atomic_commit,
36 .fb_create = drm_fb_cma_create, 179 .fb_create = drm_fb_cma_create,
37}; 180};
38 181
@@ -41,6 +184,8 @@ int vc4_kms_load(struct drm_device *dev)
41 struct vc4_dev *vc4 = to_vc4_dev(dev); 184 struct vc4_dev *vc4 = to_vc4_dev(dev);
42 int ret; 185 int ret;
43 186
187 sema_init(&vc4->async_modeset, 1);
188
44 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 189 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
45 if (ret < 0) { 190 if (ret < 0) {
46 dev_err(dev->dev, "failed to initialize vblank\n"); 191 dev_err(dev->dev, "failed to initialize vblank\n");
@@ -51,6 +196,8 @@ int vc4_kms_load(struct drm_device *dev)
51 dev->mode_config.max_height = 2048; 196 dev->mode_config.max_height = 2048;
52 dev->mode_config.funcs = &vc4_mode_funcs; 197 dev->mode_config.funcs = &vc4_mode_funcs;
53 dev->mode_config.preferred_depth = 24; 198 dev->mode_config.preferred_depth = 24;
199 dev->mode_config.async_page_flip = true;
200
54 dev->vblank_disable_allowed = true; 201 dev->vblank_disable_allowed = true;
55 202
56 drm_mode_config_reset(dev); 203 drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_packet.h b/drivers/gpu/drm/vc4/vc4_packet.h
new file mode 100644
index 000000000000..0f31cc06500f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_packet.h
@@ -0,0 +1,399 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef VC4_PACKET_H
25#define VC4_PACKET_H
26
27#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
28
29enum vc4_packet {
30 VC4_PACKET_HALT = 0,
31 VC4_PACKET_NOP = 1,
32
33 VC4_PACKET_FLUSH = 4,
34 VC4_PACKET_FLUSH_ALL = 5,
35 VC4_PACKET_START_TILE_BINNING = 6,
36 VC4_PACKET_INCREMENT_SEMAPHORE = 7,
37 VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
38
39 VC4_PACKET_BRANCH = 16,
40 VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
41
42 VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
43 VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
44 VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
45 VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
46 VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
47 VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
48
49 VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
50 VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
51
52 VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
53 VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
54
55 VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
56
57 VC4_PACKET_GL_SHADER_STATE = 64,
58 VC4_PACKET_NV_SHADER_STATE = 65,
59 VC4_PACKET_VG_SHADER_STATE = 66,
60
61 VC4_PACKET_CONFIGURATION_BITS = 96,
62 VC4_PACKET_FLAT_SHADE_FLAGS = 97,
63 VC4_PACKET_POINT_SIZE = 98,
64 VC4_PACKET_LINE_WIDTH = 99,
65 VC4_PACKET_RHT_X_BOUNDARY = 100,
66 VC4_PACKET_DEPTH_OFFSET = 101,
67 VC4_PACKET_CLIP_WINDOW = 102,
68 VC4_PACKET_VIEWPORT_OFFSET = 103,
69 VC4_PACKET_Z_CLIPPING = 104,
70 VC4_PACKET_CLIPPER_XY_SCALING = 105,
71 VC4_PACKET_CLIPPER_Z_SCALING = 106,
72
73 VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
74 VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
75 VC4_PACKET_CLEAR_COLORS = 114,
76 VC4_PACKET_TILE_COORDINATES = 115,
77
78 /* Not an actual hardware packet -- this is what we use to put
79 * references to GEM bos in the command stream, since we need the u32
80 * int the actual address packet in order to store the offset from the
81 * start of the BO.
82 */
83 VC4_PACKET_GEM_HANDLES = 254,
84} __attribute__ ((__packed__));
85
86#define VC4_PACKET_HALT_SIZE 1
87#define VC4_PACKET_NOP_SIZE 1
88#define VC4_PACKET_FLUSH_SIZE 1
89#define VC4_PACKET_FLUSH_ALL_SIZE 1
90#define VC4_PACKET_START_TILE_BINNING_SIZE 1
91#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
92#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
93#define VC4_PACKET_BRANCH_SIZE 5
94#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
95#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
96#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
97#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5
98#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5
99#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
100#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
101#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
102#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
103#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1
104#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1
105#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
106#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
107#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
108#define VC4_PACKET_VG_SHADER_STATE_SIZE 5
109#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
110#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
111#define VC4_PACKET_POINT_SIZE_SIZE 5
112#define VC4_PACKET_LINE_WIDTH_SIZE 5
113#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
114#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
115#define VC4_PACKET_CLIP_WINDOW_SIZE 9
116#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
117#define VC4_PACKET_Z_CLIPPING_SIZE 9
118#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
119#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
120#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
121#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
122#define VC4_PACKET_CLEAR_COLORS_SIZE 14
123#define VC4_PACKET_TILE_COORDINATES_SIZE 3
124#define VC4_PACKET_GEM_HANDLES_SIZE 9
125
126/* Number of multisamples supported. */
127#define VC4_MAX_SAMPLES 4
128/* Size of a full resolution color or Z tile buffer load/store. */
129#define VC4_TILE_BUFFER_SIZE (64 * 64 * 4)
130
131/** @{
132 * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
133 * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
134*/
135#define VC4_TILING_FORMAT_LINEAR 0
136#define VC4_TILING_FORMAT_T 1
137#define VC4_TILING_FORMAT_LT 2
138/** @} */
139
140/** @{
141 *
142 * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
143 * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
144 */
145#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
146#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
147#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
148#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
149
150/** @{
151 *
152 * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
153 * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
154 */
155#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
156#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
157#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
158#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
159
160/** @{
161 *
162 * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
163 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
164 */
165
166#define VC4_LOADSTORE_TILE_BUFFER_EOF BIT(3)
167#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK BIT(2)
168#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS BIT(1)
169#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR BIT(0)
170
171/** @} */
172
173/** @{
174 *
175 * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
176 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
177 */
178#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR BIT(15)
179#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR BIT(14)
180#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR BIT(13)
181#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP BIT(12)
182
183#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
184#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
185#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
186#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
187#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
188/** @} */
189
190/** @{
191 *
192 * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
193 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
194 */
195#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
196#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
197#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
198#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
199#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
200
201/** The values of the field are VC4_TILING_FORMAT_* */
202#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
203#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
204
205#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
206#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
207#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
208#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
209#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
210#define VC4_LOADSTORE_TILE_BUFFER_Z 3
211#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
212#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
213/** @} */
214
215#define VC4_INDEX_BUFFER_U8 (0 << 4)
216#define VC4_INDEX_BUFFER_U16 (1 << 4)
217
218/* This flag is only present in NV shader state. */
219#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS BIT(3)
220#define VC4_SHADER_FLAG_ENABLE_CLIPPING BIT(2)
221#define VC4_SHADER_FLAG_VS_POINT_SIZE BIT(1)
222#define VC4_SHADER_FLAG_FS_SINGLE_THREAD BIT(0)
223
224/** @{ byte 2 of config bits. */
225#define VC4_CONFIG_BITS_EARLY_Z_UPDATE BIT(1)
226#define VC4_CONFIG_BITS_EARLY_Z BIT(0)
227/** @} */
228
229/** @{ byte 1 of config bits. */
230#define VC4_CONFIG_BITS_Z_UPDATE BIT(7)
231/** same values in this 3-bit field as PIPE_FUNC_* */
232#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
233#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE BIT(3)
234
235#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
236#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
237#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
238#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
239
240#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT BIT(0)
241/** @} */
242
243/** @{ byte 0 of config bits. */
244#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
245#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
246#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
247
248#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES BIT(4)
249#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET BIT(3)
250#define VC4_CONFIG_BITS_CW_PRIMITIVES BIT(2)
251#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK BIT(1)
252#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT BIT(0)
253/** @} */
254
255/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
256#define VC4_BIN_CONFIG_DB_NON_MS BIT(7)
257
258#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
259#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
260#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
261#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
262#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
263#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
264
265#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
266#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
267#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
268#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
269#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
270#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
271
272#define VC4_BIN_CONFIG_AUTO_INIT_TSDA BIT(2)
273#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT BIT(1)
274#define VC4_BIN_CONFIG_MS_MODE_4X BIT(0)
275/** @} */
276
277/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
278#define VC4_RENDER_CONFIG_DB_NON_MS BIT(12)
279#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE BIT(11)
280#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G BIT(10)
281#define VC4_RENDER_CONFIG_COVERAGE_MODE BIT(9)
282#define VC4_RENDER_CONFIG_ENABLE_VG_MASK BIT(8)
283
284/** The values of the field are VC4_TILING_FORMAT_* */
285#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
286#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
287
288#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
289#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
290#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
291
292#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
293#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
294#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
295#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
296#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
297
298#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT BIT(1)
299#define VC4_RENDER_CONFIG_MS_MODE_4X BIT(0)
300
301#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
302#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
303#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
304#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
305#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
306#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
307
308enum vc4_texture_data_type {
309 VC4_TEXTURE_TYPE_RGBA8888 = 0,
310 VC4_TEXTURE_TYPE_RGBX8888 = 1,
311 VC4_TEXTURE_TYPE_RGBA4444 = 2,
312 VC4_TEXTURE_TYPE_RGBA5551 = 3,
313 VC4_TEXTURE_TYPE_RGB565 = 4,
314 VC4_TEXTURE_TYPE_LUMINANCE = 5,
315 VC4_TEXTURE_TYPE_ALPHA = 6,
316 VC4_TEXTURE_TYPE_LUMALPHA = 7,
317 VC4_TEXTURE_TYPE_ETC1 = 8,
318 VC4_TEXTURE_TYPE_S16F = 9,
319 VC4_TEXTURE_TYPE_S8 = 10,
320 VC4_TEXTURE_TYPE_S16 = 11,
321 VC4_TEXTURE_TYPE_BW1 = 12,
322 VC4_TEXTURE_TYPE_A4 = 13,
323 VC4_TEXTURE_TYPE_A1 = 14,
324 VC4_TEXTURE_TYPE_RGBA64 = 15,
325 VC4_TEXTURE_TYPE_RGBA32R = 16,
326 VC4_TEXTURE_TYPE_YUV422R = 17,
327};
328
329#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
330#define VC4_TEX_P0_OFFSET_SHIFT 12
331#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
332#define VC4_TEX_P0_CSWIZ_SHIFT 10
333#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
334#define VC4_TEX_P0_CMMODE_SHIFT 9
335#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
336#define VC4_TEX_P0_FLIPY_SHIFT 8
337#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
338#define VC4_TEX_P0_TYPE_SHIFT 4
339#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
340#define VC4_TEX_P0_MIPLVLS_SHIFT 0
341
342#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
343#define VC4_TEX_P1_TYPE4_SHIFT 31
344#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
345#define VC4_TEX_P1_HEIGHT_SHIFT 20
346#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
347#define VC4_TEX_P1_ETCFLIP_SHIFT 19
348#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
349#define VC4_TEX_P1_WIDTH_SHIFT 8
350
351#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
352#define VC4_TEX_P1_MAGFILT_SHIFT 7
353# define VC4_TEX_P1_MAGFILT_LINEAR 0
354# define VC4_TEX_P1_MAGFILT_NEAREST 1
355
356#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
357#define VC4_TEX_P1_MINFILT_SHIFT 4
358# define VC4_TEX_P1_MINFILT_LINEAR 0
359# define VC4_TEX_P1_MINFILT_NEAREST 1
360# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
361# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
362# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
363# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
364
365#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
366#define VC4_TEX_P1_WRAP_T_SHIFT 2
367#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
368#define VC4_TEX_P1_WRAP_S_SHIFT 0
369# define VC4_TEX_P1_WRAP_REPEAT 0
370# define VC4_TEX_P1_WRAP_CLAMP 1
371# define VC4_TEX_P1_WRAP_MIRROR 2
372# define VC4_TEX_P1_WRAP_BORDER 3
373
374#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
375#define VC4_TEX_P2_PTYPE_SHIFT 30
376# define VC4_TEX_P2_PTYPE_IGNORED 0
377# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
378# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
379# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
380
381/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
382#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
383#define VC4_TEX_P2_CMST_SHIFT 12
384#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
385#define VC4_TEX_P2_BSLOD_SHIFT 0
386
387/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
388#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
389#define VC4_TEX_P2_CHEIGHT_SHIFT 12
390#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
391#define VC4_TEX_P2_CWIDTH_SHIFT 0
392
393/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
394#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
395#define VC4_TEX_P2_CYOFF_SHIFT 12
396#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
397#define VC4_TEX_P2_CXOFF_SHIFT 0
398
399#endif /* VC4_PACKET_H */
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 887f3caad0be..0addbad15832 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -29,6 +29,14 @@ struct vc4_plane_state {
29 u32 *dlist; 29 u32 *dlist;
30 u32 dlist_size; /* Number of dwords in allocated for the display list */ 30 u32 dlist_size; /* Number of dwords in allocated for the display list */
31 u32 dlist_count; /* Number of used dwords in the display list. */ 31 u32 dlist_count; /* Number of used dwords in the display list. */
32
33 /* Offset in the dlist to pointer word 0. */
34 u32 pw0_offset;
35
36 /* Offset where the plane's dlist was last stored in the
37 hardware at vc4_crtc_atomic_flush() time.
38 */
39 u32 *hw_dlist;
32}; 40};
33 41
34static inline struct vc4_plane_state * 42static inline struct vc4_plane_state *
@@ -207,6 +215,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
207 /* Position Word 3: Context. Written by the HVS. */ 215 /* Position Word 3: Context. Written by the HVS. */
208 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 216 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
209 217
218 vc4_state->pw0_offset = vc4_state->dlist_count;
219
210 /* Pointer Word 0: RGB / Y Pointer */ 220 /* Pointer Word 0: RGB / Y Pointer */
211 vc4_dlist_write(vc4_state, bo->paddr + offset); 221 vc4_dlist_write(vc4_state, bo->paddr + offset);
212 222
@@ -258,6 +268,8 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
258 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 268 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
259 int i; 269 int i;
260 270
271 vc4_state->hw_dlist = dlist;
272
261 /* Can't memcpy_toio() because it needs to be 32-bit writes. */ 273 /* Can't memcpy_toio() because it needs to be 32-bit writes. */
262 for (i = 0; i < vc4_state->dlist_count; i++) 274 for (i = 0; i < vc4_state->dlist_count; i++)
263 writel(vc4_state->dlist[i], &dlist[i]); 275 writel(vc4_state->dlist[i], &dlist[i]);
@@ -272,6 +284,34 @@ u32 vc4_plane_dlist_size(struct drm_plane_state *state)
272 return vc4_state->dlist_count; 284 return vc4_state->dlist_count;
273} 285}
274 286
287/* Updates the plane to immediately (well, once the FIFO needs
288 * refilling) scan out from at a new framebuffer.
289 */
290void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
291{
292 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
293 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
294 uint32_t addr;
295
296 /* We're skipping the address adjustment for negative origin,
297 * because this is only called on the primary plane.
298 */
299 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
300 addr = bo->paddr + fb->offsets[0];
301
302 /* Write the new address into the hardware immediately. The
303 * scanout will start from this address as soon as the FIFO
304 * needs to refill with pixels.
305 */
306 writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
307
308 /* Also update the CPU-side dlist copy, so that any later
309 * atomic updates that don't do a new modeset on our plane
310 * also use our updated address.
311 */
312 vc4_state->dlist[vc4_state->pw0_offset] = addr;
313}
314
275static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { 315static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
276 .prepare_fb = NULL, 316 .prepare_fb = NULL,
277 .cleanup_fb = NULL, 317 .cleanup_fb = NULL,
@@ -317,7 +357,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
317 ret = drm_universal_plane_init(dev, plane, 0xff, 357 ret = drm_universal_plane_init(dev, plane, 0xff,
318 &vc4_plane_funcs, 358 &vc4_plane_funcs,
319 formats, ARRAY_SIZE(formats), 359 formats, ARRAY_SIZE(formats),
320 type); 360 type, NULL);
321 361
322 drm_plane_helper_add(plane, &vc4_plane_helper_funcs); 362 drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
323 363
diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
new file mode 100644
index 000000000000..d5c2f3c85ebb
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
@@ -0,0 +1,264 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef VC4_QPU_DEFINES_H
25#define VC4_QPU_DEFINES_H
26
27enum qpu_op_add {
28 QPU_A_NOP,
29 QPU_A_FADD,
30 QPU_A_FSUB,
31 QPU_A_FMIN,
32 QPU_A_FMAX,
33 QPU_A_FMINABS,
34 QPU_A_FMAXABS,
35 QPU_A_FTOI,
36 QPU_A_ITOF,
37 QPU_A_ADD = 12,
38 QPU_A_SUB,
39 QPU_A_SHR,
40 QPU_A_ASR,
41 QPU_A_ROR,
42 QPU_A_SHL,
43 QPU_A_MIN,
44 QPU_A_MAX,
45 QPU_A_AND,
46 QPU_A_OR,
47 QPU_A_XOR,
48 QPU_A_NOT,
49 QPU_A_CLZ,
50 QPU_A_V8ADDS = 30,
51 QPU_A_V8SUBS = 31,
52};
53
54enum qpu_op_mul {
55 QPU_M_NOP,
56 QPU_M_FMUL,
57 QPU_M_MUL24,
58 QPU_M_V8MULD,
59 QPU_M_V8MIN,
60 QPU_M_V8MAX,
61 QPU_M_V8ADDS,
62 QPU_M_V8SUBS,
63};
64
65enum qpu_raddr {
66 QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
67 /* 0-31 are the plain regfile a or b fields */
68 QPU_R_UNIF = 32,
69 QPU_R_VARY = 35,
70 QPU_R_ELEM_QPU = 38,
71 QPU_R_NOP,
72 QPU_R_XY_PIXEL_COORD = 41,
73 QPU_R_MS_REV_FLAGS = 41,
74 QPU_R_VPM = 48,
75 QPU_R_VPM_LD_BUSY,
76 QPU_R_VPM_LD_WAIT,
77 QPU_R_MUTEX_ACQUIRE,
78};
79
80enum qpu_waddr {
81 /* 0-31 are the plain regfile a or b fields */
82 QPU_W_ACC0 = 32, /* aka r0 */
83 QPU_W_ACC1,
84 QPU_W_ACC2,
85 QPU_W_ACC3,
86 QPU_W_TMU_NOSWAP,
87 QPU_W_ACC5,
88 QPU_W_HOST_INT,
89 QPU_W_NOP,
90 QPU_W_UNIFORMS_ADDRESS,
91 QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
92 QPU_W_MS_FLAGS = 42,
93 QPU_W_REV_FLAG = 42,
94 QPU_W_TLB_STENCIL_SETUP = 43,
95 QPU_W_TLB_Z,
96 QPU_W_TLB_COLOR_MS,
97 QPU_W_TLB_COLOR_ALL,
98 QPU_W_TLB_ALPHA_MASK,
99 QPU_W_VPM,
100 QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
101 QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
102 QPU_W_MUTEX_RELEASE,
103 QPU_W_SFU_RECIP,
104 QPU_W_SFU_RECIPSQRT,
105 QPU_W_SFU_EXP,
106 QPU_W_SFU_LOG,
107 QPU_W_TMU0_S,
108 QPU_W_TMU0_T,
109 QPU_W_TMU0_R,
110 QPU_W_TMU0_B,
111 QPU_W_TMU1_S,
112 QPU_W_TMU1_T,
113 QPU_W_TMU1_R,
114 QPU_W_TMU1_B,
115};
116
117enum qpu_sig_bits {
118 QPU_SIG_SW_BREAKPOINT,
119 QPU_SIG_NONE,
120 QPU_SIG_THREAD_SWITCH,
121 QPU_SIG_PROG_END,
122 QPU_SIG_WAIT_FOR_SCOREBOARD,
123 QPU_SIG_SCOREBOARD_UNLOCK,
124 QPU_SIG_LAST_THREAD_SWITCH,
125 QPU_SIG_COVERAGE_LOAD,
126 QPU_SIG_COLOR_LOAD,
127 QPU_SIG_COLOR_LOAD_END,
128 QPU_SIG_LOAD_TMU0,
129 QPU_SIG_LOAD_TMU1,
130 QPU_SIG_ALPHA_MASK_LOAD,
131 QPU_SIG_SMALL_IMM,
132 QPU_SIG_LOAD_IMM,
133 QPU_SIG_BRANCH
134};
135
136enum qpu_mux {
137 /* hardware mux values */
138 QPU_MUX_R0,
139 QPU_MUX_R1,
140 QPU_MUX_R2,
141 QPU_MUX_R3,
142 QPU_MUX_R4,
143 QPU_MUX_R5,
144 QPU_MUX_A,
145 QPU_MUX_B,
146
147 /* non-hardware mux values */
148 QPU_MUX_IMM,
149};
150
151enum qpu_cond {
152 QPU_COND_NEVER,
153 QPU_COND_ALWAYS,
154 QPU_COND_ZS,
155 QPU_COND_ZC,
156 QPU_COND_NS,
157 QPU_COND_NC,
158 QPU_COND_CS,
159 QPU_COND_CC,
160};
161
162enum qpu_pack_mul {
163 QPU_PACK_MUL_NOP,
164 /* replicated to each 8 bits of the 32-bit dst. */
165 QPU_PACK_MUL_8888 = 3,
166 QPU_PACK_MUL_8A,
167 QPU_PACK_MUL_8B,
168 QPU_PACK_MUL_8C,
169 QPU_PACK_MUL_8D,
170};
171
172enum qpu_pack_a {
173 QPU_PACK_A_NOP,
174 /* convert to 16 bit float if float input, or to int16. */
175 QPU_PACK_A_16A,
176 QPU_PACK_A_16B,
177 /* replicated to each 8 bits of the 32-bit dst. */
178 QPU_PACK_A_8888,
179 /* Convert to 8-bit unsigned int. */
180 QPU_PACK_A_8A,
181 QPU_PACK_A_8B,
182 QPU_PACK_A_8C,
183 QPU_PACK_A_8D,
184
185 /* Saturating variants of the previous instructions. */
186 QPU_PACK_A_32_SAT, /* int-only */
187 QPU_PACK_A_16A_SAT, /* int or float */
188 QPU_PACK_A_16B_SAT,
189 QPU_PACK_A_8888_SAT,
190 QPU_PACK_A_8A_SAT,
191 QPU_PACK_A_8B_SAT,
192 QPU_PACK_A_8C_SAT,
193 QPU_PACK_A_8D_SAT,
194};
195
196enum qpu_unpack_r4 {
197 QPU_UNPACK_R4_NOP,
198 QPU_UNPACK_R4_F16A_TO_F32,
199 QPU_UNPACK_R4_F16B_TO_F32,
200 QPU_UNPACK_R4_8D_REP,
201 QPU_UNPACK_R4_8A,
202 QPU_UNPACK_R4_8B,
203 QPU_UNPACK_R4_8C,
204 QPU_UNPACK_R4_8D,
205};
206
207#define QPU_MASK(high, low) \
208 ((((uint64_t)1 << ((high) - (low) + 1)) - 1) << (low))
209
210#define QPU_GET_FIELD(word, field) \
211 ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
212
213#define QPU_SIG_SHIFT 60
214#define QPU_SIG_MASK QPU_MASK(63, 60)
215
216#define QPU_UNPACK_SHIFT 57
217#define QPU_UNPACK_MASK QPU_MASK(59, 57)
218
219/**
220 * If set, the pack field means PACK_MUL or R4 packing, instead of normal
221 * regfile a packing.
222 */
223#define QPU_PM ((uint64_t)1 << 56)
224
225#define QPU_PACK_SHIFT 52
226#define QPU_PACK_MASK QPU_MASK(55, 52)
227
228#define QPU_COND_ADD_SHIFT 49
229#define QPU_COND_ADD_MASK QPU_MASK(51, 49)
230#define QPU_COND_MUL_SHIFT 46
231#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
232
233#define QPU_SF ((uint64_t)1 << 45)
234
235#define QPU_WADDR_ADD_SHIFT 38
236#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38)
237#define QPU_WADDR_MUL_SHIFT 32
238#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32)
239
240#define QPU_OP_MUL_SHIFT 29
241#define QPU_OP_MUL_MASK QPU_MASK(31, 29)
242
243#define QPU_RADDR_A_SHIFT 18
244#define QPU_RADDR_A_MASK QPU_MASK(23, 18)
245#define QPU_RADDR_B_SHIFT 12
246#define QPU_RADDR_B_MASK QPU_MASK(17, 12)
247#define QPU_SMALL_IMM_SHIFT 12
248#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12)
249
250#define QPU_ADD_A_SHIFT 9
251#define QPU_ADD_A_MASK QPU_MASK(11, 9)
252#define QPU_ADD_B_SHIFT 6
253#define QPU_ADD_B_MASK QPU_MASK(8, 6)
254#define QPU_MUL_A_SHIFT 3
255#define QPU_MUL_A_MASK QPU_MASK(5, 3)
256#define QPU_MUL_B_SHIFT 0
257#define QPU_MUL_B_MASK QPU_MASK(2, 0)
258
259#define QPU_WS ((uint64_t)1 << 44)
260
261#define QPU_OP_ADD_SHIFT 24
262#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
263
264#endif /* VC4_QPU_DEFINES_H */
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 9e4e904c668e..4e52a0a88551 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -154,7 +154,7 @@
154#define V3D_PCTRS14 0x006f4 154#define V3D_PCTRS14 0x006f4
155#define V3D_PCTR15 0x006f8 155#define V3D_PCTR15 0x006f8
156#define V3D_PCTRS15 0x006fc 156#define V3D_PCTRS15 0x006fc
157#define V3D_BGE 0x00f00 157#define V3D_DBGE 0x00f00
158#define V3D_FDBGO 0x00f04 158#define V3D_FDBGO 0x00f04
159#define V3D_FDBGB 0x00f08 159#define V3D_FDBGB 0x00f08
160#define V3D_FDBGR 0x00f0c 160#define V3D_FDBGR 0x00f0c
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
new file mode 100644
index 000000000000..8a2a312e2c1b
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -0,0 +1,634 @@
1/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: Render command list generation
26 *
27 * In the VC4 driver, render command list generation is performed by the
28 * kernel instead of userspace. We do this because validating a
29 * user-submitted command list is hard to get right and has high CPU overhead,
30 * while the number of valid configurations for render command lists is
31 * actually fairly low.
32 */
33
34#include "uapi/drm/vc4_drm.h"
35#include "vc4_drv.h"
36#include "vc4_packet.h"
37
38struct vc4_rcl_setup {
39 struct drm_gem_cma_object *color_read;
40 struct drm_gem_cma_object *color_write;
41 struct drm_gem_cma_object *zs_read;
42 struct drm_gem_cma_object *zs_write;
43 struct drm_gem_cma_object *msaa_color_write;
44 struct drm_gem_cma_object *msaa_zs_write;
45
46 struct drm_gem_cma_object *rcl;
47 u32 next_offset;
48};
49
50static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
51{
52 *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
53 setup->next_offset += 1;
54}
55
56static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val)
57{
58 *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
59 setup->next_offset += 2;
60}
61
62static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val)
63{
64 *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
65 setup->next_offset += 4;
66}
67
68/*
69 * Emits a no-op STORE_TILE_BUFFER_GENERAL.
70 *
71 * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
72 * some sort before another load is triggered.
73 */
74static void vc4_store_before_load(struct vc4_rcl_setup *setup)
75{
76 rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
77 rcl_u16(setup,
78 VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE,
79 VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
80 VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
81 VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
82 VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR);
83 rcl_u32(setup, 0); /* no address, since we're in None mode */
84}
85
86/*
87 * Calculates the physical address of the start of a tile in a RCL surface.
88 *
89 * Unlike the other load/store packets,
90 * VC4_PACKET_LOAD/STORE_FULL_RES_TILE_BUFFER don't look at the tile
91 * coordinates packet, and instead just store to the address given.
92 */
93static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
94 struct drm_gem_cma_object *bo,
95 struct drm_vc4_submit_rcl_surface *surf,
96 uint8_t x, uint8_t y)
97{
98 return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE *
99 (DIV_ROUND_UP(exec->args->width, 32) * y + x);
100}
101
102/*
103 * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
104 *
105 * The tile coordinates packet triggers a pending load if there is one, are
106 * used for clipping during rendering, and determine where loads/stores happen
107 * relative to their base address.
108 */
109static void vc4_tile_coordinates(struct vc4_rcl_setup *setup,
110 uint32_t x, uint32_t y)
111{
112 rcl_u8(setup, VC4_PACKET_TILE_COORDINATES);
113 rcl_u8(setup, x);
114 rcl_u8(setup, y);
115}
116
117static void emit_tile(struct vc4_exec_info *exec,
118 struct vc4_rcl_setup *setup,
119 uint8_t x, uint8_t y, bool first, bool last)
120{
121 struct drm_vc4_submit_cl *args = exec->args;
122 bool has_bin = args->bin_cl_size != 0;
123
124 /* Note that the load doesn't actually occur until the
125 * tile coords packet is processed, and only one load
126 * may be outstanding at a time.
127 */
128 if (setup->color_read) {
129 if (args->color_read.flags &
130 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
131 rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
132 rcl_u32(setup,
133 vc4_full_res_offset(exec, setup->color_read,
134 &args->color_read, x, y) |
135 VC4_LOADSTORE_FULL_RES_DISABLE_ZS);
136 } else {
137 rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
138 rcl_u16(setup, args->color_read.bits);
139 rcl_u32(setup, setup->color_read->paddr +
140 args->color_read.offset);
141 }
142 }
143
144 if (setup->zs_read) {
145 if (args->zs_read.flags &
146 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
147 rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
148 rcl_u32(setup,
149 vc4_full_res_offset(exec, setup->zs_read,
150 &args->zs_read, x, y) |
151 VC4_LOADSTORE_FULL_RES_DISABLE_COLOR);
152 } else {
153 if (setup->color_read) {
154 /* Exec previous load. */
155 vc4_tile_coordinates(setup, x, y);
156 vc4_store_before_load(setup);
157 }
158
159 rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
160 rcl_u16(setup, args->zs_read.bits);
161 rcl_u32(setup, setup->zs_read->paddr +
162 args->zs_read.offset);
163 }
164 }
165
166 /* Clipping depends on tile coordinates having been
167 * emitted, so we always need one here.
168 */
169 vc4_tile_coordinates(setup, x, y);
170
171 /* Wait for the binner before jumping to the first
172 * tile's lists.
173 */
174 if (first && has_bin)
175 rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE);
176
177 if (has_bin) {
178 rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST);
179 rcl_u32(setup, (exec->tile_bo->paddr +
180 exec->tile_alloc_offset +
181 (y * exec->bin_tiles_x + x) * 32));
182 }
183
184 if (setup->msaa_color_write) {
185 bool last_tile_write = (!setup->msaa_zs_write &&
186 !setup->zs_write &&
187 !setup->color_write);
188 uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_ZS;
189
190 if (!last_tile_write)
191 bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL;
192 else if (last)
193 bits |= VC4_LOADSTORE_FULL_RES_EOF;
194 rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER);
195 rcl_u32(setup,
196 vc4_full_res_offset(exec, setup->msaa_color_write,
197 &args->msaa_color_write, x, y) |
198 bits);
199 }
200
201 if (setup->msaa_zs_write) {
202 bool last_tile_write = (!setup->zs_write &&
203 !setup->color_write);
204 uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_COLOR;
205
206 if (setup->msaa_color_write)
207 vc4_tile_coordinates(setup, x, y);
208 if (!last_tile_write)
209 bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL;
210 else if (last)
211 bits |= VC4_LOADSTORE_FULL_RES_EOF;
212 rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER);
213 rcl_u32(setup,
214 vc4_full_res_offset(exec, setup->msaa_zs_write,
215 &args->msaa_zs_write, x, y) |
216 bits);
217 }
218
219 if (setup->zs_write) {
220 bool last_tile_write = !setup->color_write;
221
222 if (setup->msaa_color_write || setup->msaa_zs_write)
223 vc4_tile_coordinates(setup, x, y);
224
225 rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
226 rcl_u16(setup, args->zs_write.bits |
227 (last_tile_write ?
228 0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
229 rcl_u32(setup,
230 (setup->zs_write->paddr + args->zs_write.offset) |
231 ((last && last_tile_write) ?
232 VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
233 }
234
235 if (setup->color_write) {
236 if (setup->msaa_color_write || setup->msaa_zs_write ||
237 setup->zs_write) {
238 vc4_tile_coordinates(setup, x, y);
239 }
240
241 if (last)
242 rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
243 else
244 rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER);
245 }
246}
247
248static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
249 struct vc4_rcl_setup *setup)
250{
251 struct drm_vc4_submit_cl *args = exec->args;
252 bool has_bin = args->bin_cl_size != 0;
253 uint8_t min_x_tile = args->min_x_tile;
254 uint8_t min_y_tile = args->min_y_tile;
255 uint8_t max_x_tile = args->max_x_tile;
256 uint8_t max_y_tile = args->max_y_tile;
257 uint8_t xtiles = max_x_tile - min_x_tile + 1;
258 uint8_t ytiles = max_y_tile - min_y_tile + 1;
259 uint8_t x, y;
260 uint32_t size, loop_body_size;
261
262 size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
263 loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
264
265 if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
266 size += VC4_PACKET_CLEAR_COLORS_SIZE +
267 VC4_PACKET_TILE_COORDINATES_SIZE +
268 VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
269 }
270
271 if (setup->color_read) {
272 if (args->color_read.flags &
273 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
274 loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
275 } else {
276 loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
277 }
278 }
279 if (setup->zs_read) {
280 if (args->zs_read.flags &
281 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
282 loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
283 } else {
284 if (setup->color_read &&
285 !(args->color_read.flags &
286 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) {
287 loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
288 loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
289 }
290 loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
291 }
292 }
293
294 if (has_bin) {
295 size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE;
296 loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE;
297 }
298
299 if (setup->msaa_color_write)
300 loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE;
301 if (setup->msaa_zs_write)
302 loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE;
303
304 if (setup->zs_write)
305 loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
306 if (setup->color_write)
307 loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE;
308
309 /* We need a VC4_PACKET_TILE_COORDINATES in between each store. */
310 loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE *
311 ((setup->msaa_color_write != NULL) +
312 (setup->msaa_zs_write != NULL) +
313 (setup->color_write != NULL) +
314 (setup->zs_write != NULL) - 1);
315
316 size += xtiles * ytiles * loop_body_size;
317
318 setup->rcl = &vc4_bo_create(dev, size, true)->base;
319 if (!setup->rcl)
320 return -ENOMEM;
321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
322 &exec->unref_list);
323
324 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
325 rcl_u32(setup,
326 (setup->color_write ? (setup->color_write->paddr +
327 args->color_write.offset) :
328 0));
329 rcl_u16(setup, args->width);
330 rcl_u16(setup, args->height);
331 rcl_u16(setup, args->color_write.bits);
332
333 /* The tile buffer gets cleared when the previous tile is stored. If
334 * the clear values changed between frames, then the tile buffer has
335 * stale clear values in it, so we have to do a store in None mode (no
336 * writes) so that we trigger the tile buffer clear.
337 */
338 if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
339 rcl_u8(setup, VC4_PACKET_CLEAR_COLORS);
340 rcl_u32(setup, args->clear_color[0]);
341 rcl_u32(setup, args->clear_color[1]);
342 rcl_u32(setup, args->clear_z);
343 rcl_u8(setup, args->clear_s);
344
345 vc4_tile_coordinates(setup, 0, 0);
346
347 rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
348 rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE);
349 rcl_u32(setup, 0); /* no address, since we're in None mode */
350 }
351
352 for (y = min_y_tile; y <= max_y_tile; y++) {
353 for (x = min_x_tile; x <= max_x_tile; x++) {
354 bool first = (x == min_x_tile && y == min_y_tile);
355 bool last = (x == max_x_tile && y == max_y_tile);
356
357 emit_tile(exec, setup, x, y, first, last);
358 }
359 }
360
361 BUG_ON(setup->next_offset != size);
362 exec->ct1ca = setup->rcl->paddr;
363 exec->ct1ea = setup->rcl->paddr + setup->next_offset;
364
365 return 0;
366}
367
368static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
369 struct drm_gem_cma_object *obj,
370 struct drm_vc4_submit_rcl_surface *surf)
371{
372 struct drm_vc4_submit_cl *args = exec->args;
373 u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
374
375 if (surf->offset > obj->base.size) {
376 DRM_ERROR("surface offset %d > BO size %zd\n",
377 surf->offset, obj->base.size);
378 return -EINVAL;
379 }
380
381 if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
382 render_tiles_stride * args->max_y_tile + args->max_x_tile) {
383 DRM_ERROR("MSAA tile %d, %d out of bounds "
384 "(bo size %zd, offset %d).\n",
385 args->max_x_tile, args->max_y_tile,
386 obj->base.size,
387 surf->offset);
388 return -EINVAL;
389 }
390
391 return 0;
392}
393
394static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
395 struct drm_gem_cma_object **obj,
396 struct drm_vc4_submit_rcl_surface *surf)
397{
398 if (surf->flags != 0 || surf->bits != 0) {
399 DRM_ERROR("MSAA surface had nonzero flags/bits\n");
400 return -EINVAL;
401 }
402
403 if (surf->hindex == ~0)
404 return 0;
405
406 *obj = vc4_use_bo(exec, surf->hindex);
407 if (!*obj)
408 return -EINVAL;
409
410 if (surf->offset & 0xf) {
411 DRM_ERROR("MSAA write must be 16b aligned.\n");
412 return -EINVAL;
413 }
414
415 return vc4_full_res_bounds_check(exec, *obj, surf);
416}
417
418static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
419 struct drm_gem_cma_object **obj,
420 struct drm_vc4_submit_rcl_surface *surf)
421{
422 uint8_t tiling = VC4_GET_FIELD(surf->bits,
423 VC4_LOADSTORE_TILE_BUFFER_TILING);
424 uint8_t buffer = VC4_GET_FIELD(surf->bits,
425 VC4_LOADSTORE_TILE_BUFFER_BUFFER);
426 uint8_t format = VC4_GET_FIELD(surf->bits,
427 VC4_LOADSTORE_TILE_BUFFER_FORMAT);
428 int cpp;
429 int ret;
430
431 if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
432 DRM_ERROR("Extra flags set\n");
433 return -EINVAL;
434 }
435
436 if (surf->hindex == ~0)
437 return 0;
438
439 *obj = vc4_use_bo(exec, surf->hindex);
440 if (!*obj)
441 return -EINVAL;
442
443 if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
444 if (surf == &exec->args->zs_write) {
445 DRM_ERROR("general zs write may not be a full-res.\n");
446 return -EINVAL;
447 }
448
449 if (surf->bits != 0) {
450 DRM_ERROR("load/store general bits set with "
451 "full res load/store.\n");
452 return -EINVAL;
453 }
454
455 ret = vc4_full_res_bounds_check(exec, *obj, surf);
456 if (!ret)
457 return ret;
458
459 return 0;
460 }
461
462 if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
463 VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
464 VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
465 DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
466 surf->bits);
467 return -EINVAL;
468 }
469
470 if (tiling > VC4_TILING_FORMAT_LT) {
471 DRM_ERROR("Bad tiling format\n");
472 return -EINVAL;
473 }
474
475 if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
476 if (format != 0) {
477 DRM_ERROR("No color format should be set for ZS\n");
478 return -EINVAL;
479 }
480 cpp = 4;
481 } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) {
482 switch (format) {
483 case VC4_LOADSTORE_TILE_BUFFER_BGR565:
484 case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER:
485 cpp = 2;
486 break;
487 case VC4_LOADSTORE_TILE_BUFFER_RGBA8888:
488 cpp = 4;
489 break;
490 default:
491 DRM_ERROR("Bad tile buffer format\n");
492 return -EINVAL;
493 }
494 } else {
495 DRM_ERROR("Bad load/store buffer %d.\n", buffer);
496 return -EINVAL;
497 }
498
499 if (surf->offset & 0xf) {
500 DRM_ERROR("load/store buffer must be 16b aligned.\n");
501 return -EINVAL;
502 }
503
504 if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
505 exec->args->width, exec->args->height, cpp)) {
506 return -EINVAL;
507 }
508
509 return 0;
510}
511
512static int
513vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
514 struct vc4_rcl_setup *setup,
515 struct drm_gem_cma_object **obj,
516 struct drm_vc4_submit_rcl_surface *surf)
517{
518 uint8_t tiling = VC4_GET_FIELD(surf->bits,
519 VC4_RENDER_CONFIG_MEMORY_FORMAT);
520 uint8_t format = VC4_GET_FIELD(surf->bits,
521 VC4_RENDER_CONFIG_FORMAT);
522 int cpp;
523
524 if (surf->flags != 0) {
525 DRM_ERROR("No flags supported on render config.\n");
526 return -EINVAL;
527 }
528
529 if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK |
530 VC4_RENDER_CONFIG_FORMAT_MASK |
531 VC4_RENDER_CONFIG_MS_MODE_4X |
532 VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
533 DRM_ERROR("Unknown bits in render config: 0x%04x\n",
534 surf->bits);
535 return -EINVAL;
536 }
537
538 if (surf->hindex == ~0)
539 return 0;
540
541 *obj = vc4_use_bo(exec, surf->hindex);
542 if (!*obj)
543 return -EINVAL;
544
545 if (tiling > VC4_TILING_FORMAT_LT) {
546 DRM_ERROR("Bad tiling format\n");
547 return -EINVAL;
548 }
549
550 switch (format) {
551 case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED:
552 case VC4_RENDER_CONFIG_FORMAT_BGR565:
553 cpp = 2;
554 break;
555 case VC4_RENDER_CONFIG_FORMAT_RGBA8888:
556 cpp = 4;
557 break;
558 default:
559 DRM_ERROR("Bad tile buffer format\n");
560 return -EINVAL;
561 }
562
563 if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
564 exec->args->width, exec->args->height, cpp)) {
565 return -EINVAL;
566 }
567
568 return 0;
569}
570
571int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
572{
573 struct vc4_rcl_setup setup = {0};
574 struct drm_vc4_submit_cl *args = exec->args;
575 bool has_bin = args->bin_cl_size != 0;
576 int ret;
577
578 if (args->min_x_tile > args->max_x_tile ||
579 args->min_y_tile > args->max_y_tile) {
580 DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
581 args->min_x_tile, args->min_y_tile,
582 args->max_x_tile, args->max_y_tile);
583 return -EINVAL;
584 }
585
586 if (has_bin &&
587 (args->max_x_tile > exec->bin_tiles_x ||
588 args->max_y_tile > exec->bin_tiles_y)) {
589 DRM_ERROR("Render tiles (%d,%d) outside of bin config "
590 "(%d,%d)\n",
591 args->max_x_tile, args->max_y_tile,
592 exec->bin_tiles_x, exec->bin_tiles_y);
593 return -EINVAL;
594 }
595
596 ret = vc4_rcl_render_config_surface_setup(exec, &setup,
597 &setup.color_write,
598 &args->color_write);
599 if (ret)
600 return ret;
601
602 ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
603 if (ret)
604 return ret;
605
606 ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
607 if (ret)
608 return ret;
609
610 ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
611 if (ret)
612 return ret;
613
614 ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_color_write,
615 &args->msaa_color_write);
616 if (ret)
617 return ret;
618
619 ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_zs_write,
620 &args->msaa_zs_write);
621 if (ret)
622 return ret;
623
624 /* We shouldn't even have the job submitted to us if there's no
625 * surface to write out.
626 */
627 if (!setup.color_write && !setup.zs_write &&
628 !setup.msaa_color_write && !setup.msaa_zs_write) {
629 DRM_ERROR("RCL requires color or Z/S write\n");
630 return -EINVAL;
631 }
632
633 return vc4_create_rcl_bo(dev, exec, &setup);
634}
diff --git a/drivers/gpu/drm/vc4/vc4_trace.h b/drivers/gpu/drm/vc4/vc4_trace.h
new file mode 100644
index 000000000000..ad7b1ea720c2
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_trace.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10#define _VC4_TRACE_H_
11
12#include <linux/stringify.h>
13#include <linux/types.h>
14#include <linux/tracepoint.h>
15
16#undef TRACE_SYSTEM
17#define TRACE_SYSTEM vc4
18#define TRACE_INCLUDE_FILE vc4_trace
19
20TRACE_EVENT(vc4_wait_for_seqno_begin,
21 TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout),
22 TP_ARGS(dev, seqno, timeout),
23
24 TP_STRUCT__entry(
25 __field(u32, dev)
26 __field(u64, seqno)
27 __field(u64, timeout)
28 ),
29
30 TP_fast_assign(
31 __entry->dev = dev->primary->index;
32 __entry->seqno = seqno;
33 __entry->timeout = timeout;
34 ),
35
36 TP_printk("dev=%u, seqno=%llu, timeout=%llu",
37 __entry->dev, __entry->seqno, __entry->timeout)
38);
39
40TRACE_EVENT(vc4_wait_for_seqno_end,
41 TP_PROTO(struct drm_device *dev, uint64_t seqno),
42 TP_ARGS(dev, seqno),
43
44 TP_STRUCT__entry(
45 __field(u32, dev)
46 __field(u64, seqno)
47 ),
48
49 TP_fast_assign(
50 __entry->dev = dev->primary->index;
51 __entry->seqno = seqno;
52 ),
53
54 TP_printk("dev=%u, seqno=%llu",
55 __entry->dev, __entry->seqno)
56);
57
58#endif /* _VC4_TRACE_H_ */
59
60/* This part must be outside protection */
61#undef TRACE_INCLUDE_PATH
62#define TRACE_INCLUDE_PATH .
63#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/vc4/vc4_trace_points.c b/drivers/gpu/drm/vc4/vc4_trace_points.c
new file mode 100644
index 000000000000..e6278f25716b
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_trace_points.c
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "vc4_drv.h"
10
11#ifndef __CHECKER__
12#define CREATE_TRACE_POINTS
13#include "vc4_trace.h"
14#endif
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
new file mode 100644
index 000000000000..424d515ffcda
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "linux/component.h"
20#include "vc4_drv.h"
21#include "vc4_regs.h"
22
23#ifdef CONFIG_DEBUG_FS
24#define REGDEF(reg) { reg, #reg }
25static const struct {
26 uint32_t reg;
27 const char *name;
28} vc4_reg_defs[] = {
29 REGDEF(V3D_IDENT0),
30 REGDEF(V3D_IDENT1),
31 REGDEF(V3D_IDENT2),
32 REGDEF(V3D_SCRATCH),
33 REGDEF(V3D_L2CACTL),
34 REGDEF(V3D_SLCACTL),
35 REGDEF(V3D_INTCTL),
36 REGDEF(V3D_INTENA),
37 REGDEF(V3D_INTDIS),
38 REGDEF(V3D_CT0CS),
39 REGDEF(V3D_CT1CS),
40 REGDEF(V3D_CT0EA),
41 REGDEF(V3D_CT1EA),
42 REGDEF(V3D_CT0CA),
43 REGDEF(V3D_CT1CA),
44 REGDEF(V3D_CT00RA0),
45 REGDEF(V3D_CT01RA0),
46 REGDEF(V3D_CT0LC),
47 REGDEF(V3D_CT1LC),
48 REGDEF(V3D_CT0PC),
49 REGDEF(V3D_CT1PC),
50 REGDEF(V3D_PCS),
51 REGDEF(V3D_BFC),
52 REGDEF(V3D_RFC),
53 REGDEF(V3D_BPCA),
54 REGDEF(V3D_BPCS),
55 REGDEF(V3D_BPOA),
56 REGDEF(V3D_BPOS),
57 REGDEF(V3D_BXCF),
58 REGDEF(V3D_SQRSV0),
59 REGDEF(V3D_SQRSV1),
60 REGDEF(V3D_SQCNTL),
61 REGDEF(V3D_SRQPC),
62 REGDEF(V3D_SRQUA),
63 REGDEF(V3D_SRQUL),
64 REGDEF(V3D_SRQCS),
65 REGDEF(V3D_VPACNTL),
66 REGDEF(V3D_VPMBASE),
67 REGDEF(V3D_PCTRC),
68 REGDEF(V3D_PCTRE),
69 REGDEF(V3D_PCTR0),
70 REGDEF(V3D_PCTRS0),
71 REGDEF(V3D_PCTR1),
72 REGDEF(V3D_PCTRS1),
73 REGDEF(V3D_PCTR2),
74 REGDEF(V3D_PCTRS2),
75 REGDEF(V3D_PCTR3),
76 REGDEF(V3D_PCTRS3),
77 REGDEF(V3D_PCTR4),
78 REGDEF(V3D_PCTRS4),
79 REGDEF(V3D_PCTR5),
80 REGDEF(V3D_PCTRS5),
81 REGDEF(V3D_PCTR6),
82 REGDEF(V3D_PCTRS6),
83 REGDEF(V3D_PCTR7),
84 REGDEF(V3D_PCTRS7),
85 REGDEF(V3D_PCTR8),
86 REGDEF(V3D_PCTRS8),
87 REGDEF(V3D_PCTR9),
88 REGDEF(V3D_PCTRS9),
89 REGDEF(V3D_PCTR10),
90 REGDEF(V3D_PCTRS10),
91 REGDEF(V3D_PCTR11),
92 REGDEF(V3D_PCTRS11),
93 REGDEF(V3D_PCTR12),
94 REGDEF(V3D_PCTRS12),
95 REGDEF(V3D_PCTR13),
96 REGDEF(V3D_PCTRS13),
97 REGDEF(V3D_PCTR14),
98 REGDEF(V3D_PCTRS14),
99 REGDEF(V3D_PCTR15),
100 REGDEF(V3D_PCTRS15),
101 REGDEF(V3D_DBGE),
102 REGDEF(V3D_FDBGO),
103 REGDEF(V3D_FDBGB),
104 REGDEF(V3D_FDBGR),
105 REGDEF(V3D_FDBGS),
106 REGDEF(V3D_ERRSTAT),
107};
108
109int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
110{
111 struct drm_info_node *node = (struct drm_info_node *)m->private;
112 struct drm_device *dev = node->minor->dev;
113 struct vc4_dev *vc4 = to_vc4_dev(dev);
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
117 seq_printf(m, "%s (0x%04x): 0x%08x\n",
118 vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
119 V3D_READ(vc4_reg_defs[i].reg));
120 }
121
122 return 0;
123}
124
125int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
126{
127 struct drm_info_node *node = (struct drm_info_node *)m->private;
128 struct drm_device *dev = node->minor->dev;
129 struct vc4_dev *vc4 = to_vc4_dev(dev);
130 uint32_t ident1 = V3D_READ(V3D_IDENT1);
131 uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
132 uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
133 uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
134
135 seq_printf(m, "Revision: %d\n",
136 VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
137 seq_printf(m, "Slices: %d\n", nslc);
138 seq_printf(m, "TMUs: %d\n", nslc * tups);
139 seq_printf(m, "QPUs: %d\n", nslc * qups);
140 seq_printf(m, "Semaphores: %d\n",
141 VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
142
143 return 0;
144}
145#endif /* CONFIG_DEBUG_FS */
146
147/*
148 * Asks the firmware to turn on power to the V3D engine.
149 *
150 * This may be doable with just the clocks interface, though this
151 * packet does some other register setup from the firmware, too.
152 */
153int
154vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
155{
156 if (on)
157 return pm_generic_poweroff(&vc4->v3d->pdev->dev);
158 else
159 return pm_generic_resume(&vc4->v3d->pdev->dev);
160}
161
162static void vc4_v3d_init_hw(struct drm_device *dev)
163{
164 struct vc4_dev *vc4 = to_vc4_dev(dev);
165
166 /* Take all the memory that would have been reserved for user
167 * QPU programs, since we don't have an interface for running
168 * them, anyway.
169 */
170 V3D_WRITE(V3D_VPMBASE, 0);
171}
172
173static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
174{
175 struct platform_device *pdev = to_platform_device(dev);
176 struct drm_device *drm = dev_get_drvdata(master);
177 struct vc4_dev *vc4 = to_vc4_dev(drm);
178 struct vc4_v3d *v3d = NULL;
179 int ret;
180
181 v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
182 if (!v3d)
183 return -ENOMEM;
184
185 v3d->pdev = pdev;
186
187 v3d->regs = vc4_ioremap_regs(pdev, 0);
188 if (IS_ERR(v3d->regs))
189 return PTR_ERR(v3d->regs);
190
191 vc4->v3d = v3d;
192
193 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
194 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
195 V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
196 return -EINVAL;
197 }
198
199 /* Reset the binner overflow address/size at setup, to be sure
200 * we don't reuse an old one.
201 */
202 V3D_WRITE(V3D_BPOA, 0);
203 V3D_WRITE(V3D_BPOS, 0);
204
205 vc4_v3d_init_hw(drm);
206
207 ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
208 if (ret) {
209 DRM_ERROR("Failed to install IRQ handler\n");
210 return ret;
211 }
212
213 return 0;
214}
215
216static void vc4_v3d_unbind(struct device *dev, struct device *master,
217 void *data)
218{
219 struct drm_device *drm = dev_get_drvdata(master);
220 struct vc4_dev *vc4 = to_vc4_dev(drm);
221
222 drm_irq_uninstall(drm);
223
224 /* Disable the binner's overflow memory address, so the next
225 * driver probe (if any) doesn't try to reuse our old
226 * allocation.
227 */
228 V3D_WRITE(V3D_BPOA, 0);
229 V3D_WRITE(V3D_BPOS, 0);
230
231 vc4->v3d = NULL;
232}
233
234static const struct component_ops vc4_v3d_ops = {
235 .bind = vc4_v3d_bind,
236 .unbind = vc4_v3d_unbind,
237};
238
239static int vc4_v3d_dev_probe(struct platform_device *pdev)
240{
241 return component_add(&pdev->dev, &vc4_v3d_ops);
242}
243
244static int vc4_v3d_dev_remove(struct platform_device *pdev)
245{
246 component_del(&pdev->dev, &vc4_v3d_ops);
247 return 0;
248}
249
250static const struct of_device_id vc4_v3d_dt_match[] = {
251 { .compatible = "brcm,vc4-v3d" },
252 {}
253};
254
255struct platform_driver vc4_v3d_driver = {
256 .probe = vc4_v3d_dev_probe,
257 .remove = vc4_v3d_dev_remove,
258 .driver = {
259 .name = "vc4_v3d",
260 .of_match_table = vc4_v3d_dt_match,
261 },
262};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
new file mode 100644
index 000000000000..0fb5b994b9dd
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -0,0 +1,900 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/**
25 * Command list validator for VC4.
26 *
27 * The VC4 has no IOMMU between it and system memory. So, a user with
28 * access to execute command lists could escalate privilege by
29 * overwriting system memory (drawing to it as a framebuffer) or
30 * reading system memory it shouldn't (reading it as a texture, or
31 * uniform data, or vertex data).
32 *
33 * This validates command lists to ensure that all accesses are within
34 * the bounds of the GEM objects referenced. It explicitly whitelists
35 * packets, and looks at the offsets in any address fields to make
36 * sure they're constrained within the BOs they reference.
37 *
38 * Note that because of the validation that's happening anyway, this
39 * is where GEM relocation processing happens.
40 */
41
42#include "uapi/drm/vc4_drm.h"
43#include "vc4_drv.h"
44#include "vc4_packet.h"
45
46#define VALIDATE_ARGS \
47 struct vc4_exec_info *exec, \
48 void *validated, \
49 void *untrusted
50
51/** Return the width in pixels of a 64-byte microtile. */
52static uint32_t
53utile_width(int cpp)
54{
55 switch (cpp) {
56 case 1:
57 case 2:
58 return 8;
59 case 4:
60 return 4;
61 case 8:
62 return 2;
63 default:
64 DRM_ERROR("unknown cpp: %d\n", cpp);
65 return 1;
66 }
67}
68
69/** Return the height in pixels of a 64-byte microtile. */
70static uint32_t
71utile_height(int cpp)
72{
73 switch (cpp) {
74 case 1:
75 return 8;
76 case 2:
77 case 4:
78 case 8:
79 return 4;
80 default:
81 DRM_ERROR("unknown cpp: %d\n", cpp);
82 return 1;
83 }
84}
85
86/**
87 * The texture unit decides what tiling format a particular miplevel is using
88 * this function, so we lay out our miptrees accordingly.
89 */
90static bool
91size_is_lt(uint32_t width, uint32_t height, int cpp)
92{
93 return (width <= 4 * utile_width(cpp) ||
94 height <= 4 * utile_height(cpp));
95}
96
97struct drm_gem_cma_object *
98vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
99{
100 struct drm_gem_cma_object *obj;
101 struct vc4_bo *bo;
102
103 if (hindex >= exec->bo_count) {
104 DRM_ERROR("BO index %d greater than BO count %d\n",
105 hindex, exec->bo_count);
106 return NULL;
107 }
108 obj = exec->bo[hindex];
109 bo = to_vc4_bo(&obj->base);
110
111 if (bo->validated_shader) {
112 DRM_ERROR("Trying to use shader BO as something other than "
113 "a shader\n");
114 return NULL;
115 }
116
117 return obj;
118}
119
120static struct drm_gem_cma_object *
121vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
122{
123 return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
124}
125
126static bool
127validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos)
128{
129 /* Note that the untrusted pointer passed to these functions is
130 * incremented past the packet byte.
131 */
132 return (untrusted - 1 == exec->bin_u + pos);
133}
134
135static uint32_t
136gl_shader_rec_size(uint32_t pointer_bits)
137{
138 uint32_t attribute_count = pointer_bits & 7;
139 bool extended = pointer_bits & 8;
140
141 if (attribute_count == 0)
142 attribute_count = 8;
143
144 if (extended)
145 return 100 + attribute_count * 4;
146 else
147 return 36 + attribute_count * 8;
148}
149
150bool
151vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
152 uint32_t offset, uint8_t tiling_format,
153 uint32_t width, uint32_t height, uint8_t cpp)
154{
155 uint32_t aligned_width, aligned_height, stride, size;
156 uint32_t utile_w = utile_width(cpp);
157 uint32_t utile_h = utile_height(cpp);
158
159 /* The shaded vertex format stores signed 12.4 fixed point
160 * (-2048,2047) offsets from the viewport center, so we should
161 * never have a render target larger than 4096. The texture
162 * unit can only sample from 2048x2048, so it's even more
163 * restricted. This lets us avoid worrying about overflow in
164 * our math.
165 */
166 if (width > 4096 || height > 4096) {
167 DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
168 return false;
169 }
170
171 switch (tiling_format) {
172 case VC4_TILING_FORMAT_LINEAR:
173 aligned_width = round_up(width, utile_w);
174 aligned_height = height;
175 break;
176 case VC4_TILING_FORMAT_T:
177 aligned_width = round_up(width, utile_w * 8);
178 aligned_height = round_up(height, utile_h * 8);
179 break;
180 case VC4_TILING_FORMAT_LT:
181 aligned_width = round_up(width, utile_w);
182 aligned_height = round_up(height, utile_h);
183 break;
184 default:
185 DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
186 return false;
187 }
188
189 stride = aligned_width * cpp;
190 size = stride * aligned_height;
191
192 if (size + offset < size ||
193 size + offset > fbo->base.size) {
194 DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
195 width, height,
196 aligned_width, aligned_height,
197 size, offset, fbo->base.size);
198 return false;
199 }
200
201 return true;
202}
203
204static int
205validate_flush(VALIDATE_ARGS)
206{
207 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
208 DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
209 return -EINVAL;
210 }
211 exec->found_flush = true;
212
213 return 0;
214}
215
216static int
217validate_start_tile_binning(VALIDATE_ARGS)
218{
219 if (exec->found_start_tile_binning_packet) {
220 DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
221 return -EINVAL;
222 }
223 exec->found_start_tile_binning_packet = true;
224
225 if (!exec->found_tile_binning_mode_config_packet) {
226 DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
227 return -EINVAL;
228 }
229
230 return 0;
231}
232
233static int
234validate_increment_semaphore(VALIDATE_ARGS)
235{
236 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
237 DRM_ERROR("Bin CL must end with "
238 "VC4_PACKET_INCREMENT_SEMAPHORE\n");
239 return -EINVAL;
240 }
241 exec->found_increment_semaphore_packet = true;
242
243 return 0;
244}
245
246static int
247validate_indexed_prim_list(VALIDATE_ARGS)
248{
249 struct drm_gem_cma_object *ib;
250 uint32_t length = *(uint32_t *)(untrusted + 1);
251 uint32_t offset = *(uint32_t *)(untrusted + 5);
252 uint32_t max_index = *(uint32_t *)(untrusted + 9);
253 uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
254 struct vc4_shader_state *shader_state;
255
256 /* Check overflow condition */
257 if (exec->shader_state_count == 0) {
258 DRM_ERROR("shader state must precede primitives\n");
259 return -EINVAL;
260 }
261 shader_state = &exec->shader_state[exec->shader_state_count - 1];
262
263 if (max_index > shader_state->max_index)
264 shader_state->max_index = max_index;
265
266 ib = vc4_use_handle(exec, 0);
267 if (!ib)
268 return -EINVAL;
269
270 if (offset > ib->base.size ||
271 (ib->base.size - offset) / index_size < length) {
272 DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
273 offset, length, index_size, ib->base.size);
274 return -EINVAL;
275 }
276
277 *(uint32_t *)(validated + 5) = ib->paddr + offset;
278
279 return 0;
280}
281
282static int
283validate_gl_array_primitive(VALIDATE_ARGS)
284{
285 uint32_t length = *(uint32_t *)(untrusted + 1);
286 uint32_t base_index = *(uint32_t *)(untrusted + 5);
287 uint32_t max_index;
288 struct vc4_shader_state *shader_state;
289
290 /* Check overflow condition */
291 if (exec->shader_state_count == 0) {
292 DRM_ERROR("shader state must precede primitives\n");
293 return -EINVAL;
294 }
295 shader_state = &exec->shader_state[exec->shader_state_count - 1];
296
297 if (length + base_index < length) {
298 DRM_ERROR("primitive vertex count overflow\n");
299 return -EINVAL;
300 }
301 max_index = length + base_index - 1;
302
303 if (max_index > shader_state->max_index)
304 shader_state->max_index = max_index;
305
306 return 0;
307}
308
309static int
310validate_gl_shader_state(VALIDATE_ARGS)
311{
312 uint32_t i = exec->shader_state_count++;
313
314 if (i >= exec->shader_state_size) {
315 DRM_ERROR("More requests for shader states than declared\n");
316 return -EINVAL;
317 }
318
319 exec->shader_state[i].addr = *(uint32_t *)untrusted;
320 exec->shader_state[i].max_index = 0;
321
322 if (exec->shader_state[i].addr & ~0xf) {
323 DRM_ERROR("high bits set in GL shader rec reference\n");
324 return -EINVAL;
325 }
326
327 *(uint32_t *)validated = (exec->shader_rec_p +
328 exec->shader_state[i].addr);
329
330 exec->shader_rec_p +=
331 roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
332
333 return 0;
334}
335
336static int
337validate_tile_binning_config(VALIDATE_ARGS)
338{
339 struct drm_device *dev = exec->exec_bo->base.dev;
340 struct vc4_bo *tile_bo;
341 uint8_t flags;
342 uint32_t tile_state_size, tile_alloc_size;
343 uint32_t tile_count;
344
345 if (exec->found_tile_binning_mode_config_packet) {
346 DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
347 return -EINVAL;
348 }
349 exec->found_tile_binning_mode_config_packet = true;
350
351 exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
352 exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
353 tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
354 flags = *(uint8_t *)(untrusted + 14);
355
356 if (exec->bin_tiles_x == 0 ||
357 exec->bin_tiles_y == 0) {
358 DRM_ERROR("Tile binning config of %dx%d too small\n",
359 exec->bin_tiles_x, exec->bin_tiles_y);
360 return -EINVAL;
361 }
362
363 if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
364 VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
365 DRM_ERROR("unsupported binning config flags 0x%02x\n", flags);
366 return -EINVAL;
367 }
368
369 /* The tile state data array is 48 bytes per tile, and we put it at
370 * the start of a BO containing both it and the tile alloc.
371 */
372 tile_state_size = 48 * tile_count;
373
374 /* Since the tile alloc array will follow us, align. */
375 exec->tile_alloc_offset = roundup(tile_state_size, 4096);
376
377 *(uint8_t *)(validated + 14) =
378 ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
379 VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
380 VC4_BIN_CONFIG_AUTO_INIT_TSDA |
381 VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
382 VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
383 VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
384 VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
385
386 /* Initial block size. */
387 tile_alloc_size = 32 * tile_count;
388
389 /*
390 * The initial allocation gets rounded to the next 256 bytes before
391 * the hardware starts fulfilling further allocations.
392 */
393 tile_alloc_size = roundup(tile_alloc_size, 256);
394
395 /* Add space for the extra allocations. This is what gets used first,
396 * before overflow memory. It must have at least 4096 bytes, but we
397 * want to avoid overflow memory usage if possible.
398 */
399 tile_alloc_size += 1024 * 1024;
400
401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
402 true);
403 exec->tile_bo = &tile_bo->base;
404 if (!exec->tile_bo)
405 return -ENOMEM;
406 list_add_tail(&tile_bo->unref_head, &exec->unref_list);
407
408 /* tile alloc address. */
409 *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr +
410 exec->tile_alloc_offset);
411 /* tile alloc size. */
412 *(uint32_t *)(validated + 4) = tile_alloc_size;
413 /* tile state address. */
414 *(uint32_t *)(validated + 8) = exec->tile_bo->paddr;
415
416 return 0;
417}
418
419static int
420validate_gem_handles(VALIDATE_ARGS)
421{
422 memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
423 return 0;
424}
425
426#define VC4_DEFINE_PACKET(packet, func) \
427 [packet] = { packet ## _SIZE, #packet, func }
428
429static const struct cmd_info {
430 uint16_t len;
431 const char *name;
432 int (*func)(struct vc4_exec_info *exec, void *validated,
433 void *untrusted);
434} cmd_info[] = {
435 VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL),
436 VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL),
437 VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush),
438 VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL),
439 VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING,
440 validate_start_tile_binning),
441 VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE,
442 validate_increment_semaphore),
443
444 VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE,
445 validate_indexed_prim_list),
446 VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE,
447 validate_gl_array_primitive),
448
449 VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL),
450
451 VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state),
452
453 VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL),
454 VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL),
455 VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, NULL),
456 VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, NULL),
457 VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, NULL),
458 VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, NULL),
459 VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, NULL),
460 VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, NULL),
461 VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, NULL),
462 /* Note: The docs say this was also 105, but it was 106 in the
463 * initial userland code drop.
464 */
465 VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, NULL),
466
467 VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG,
468 validate_tile_binning_config),
469
470 VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, validate_gem_handles),
471};
472
473int
474vc4_validate_bin_cl(struct drm_device *dev,
475 void *validated,
476 void *unvalidated,
477 struct vc4_exec_info *exec)
478{
479 uint32_t len = exec->args->bin_cl_size;
480 uint32_t dst_offset = 0;
481 uint32_t src_offset = 0;
482
483 while (src_offset < len) {
484 void *dst_pkt = validated + dst_offset;
485 void *src_pkt = unvalidated + src_offset;
486 u8 cmd = *(uint8_t *)src_pkt;
487 const struct cmd_info *info;
488
489 if (cmd >= ARRAY_SIZE(cmd_info)) {
490 DRM_ERROR("0x%08x: packet %d out of bounds\n",
491 src_offset, cmd);
492 return -EINVAL;
493 }
494
495 info = &cmd_info[cmd];
496 if (!info->name) {
497 DRM_ERROR("0x%08x: packet %d invalid\n",
498 src_offset, cmd);
499 return -EINVAL;
500 }
501
502 if (src_offset + info->len > len) {
503 DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
504 "exceeds bounds (0x%08x)\n",
505 src_offset, cmd, info->name, info->len,
506 src_offset + len);
507 return -EINVAL;
508 }
509
510 if (cmd != VC4_PACKET_GEM_HANDLES)
511 memcpy(dst_pkt, src_pkt, info->len);
512
513 if (info->func && info->func(exec,
514 dst_pkt + 1,
515 src_pkt + 1)) {
516 DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n",
517 src_offset, cmd, info->name);
518 return -EINVAL;
519 }
520
521 src_offset += info->len;
522 /* GEM handle loading doesn't produce HW packets. */
523 if (cmd != VC4_PACKET_GEM_HANDLES)
524 dst_offset += info->len;
525
526 /* When the CL hits halt, it'll stop reading anything else. */
527 if (cmd == VC4_PACKET_HALT)
528 break;
529 }
530
531 exec->ct0ea = exec->ct0ca + dst_offset;
532
533 if (!exec->found_start_tile_binning_packet) {
534 DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
535 return -EINVAL;
536 }
537
538 /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The
539 * semaphore is used to trigger the render CL to start up, and the
540 * FLUSH is what caps the bin lists with
541 * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main
542 * render CL when they get called to) and actually triggers the queued
543 * semaphore increment.
544 */
545 if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
546 DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
547 "VC4_PACKET_FLUSH\n");
548 return -EINVAL;
549 }
550
551 return 0;
552}
553
554static bool
555reloc_tex(struct vc4_exec_info *exec,
556 void *uniform_data_u,
557 struct vc4_texture_sample_info *sample,
558 uint32_t texture_handle_index)
559
560{
561 struct drm_gem_cma_object *tex;
562 uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
563 uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
564 uint32_t p2 = (sample->p_offset[2] != ~0 ?
565 *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
566 uint32_t p3 = (sample->p_offset[3] != ~0 ?
567 *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
568 uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
569 uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
570 uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
571 uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
572 uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
573 uint32_t cpp, tiling_format, utile_w, utile_h;
574 uint32_t i;
575 uint32_t cube_map_stride = 0;
576 enum vc4_texture_data_type type;
577
578 tex = vc4_use_bo(exec, texture_handle_index);
579 if (!tex)
580 return false;
581
582 if (sample->is_direct) {
583 uint32_t remaining_size = tex->base.size - p0;
584
585 if (p0 > tex->base.size - 4) {
586 DRM_ERROR("UBO offset greater than UBO size\n");
587 goto fail;
588 }
589 if (p1 > remaining_size - 4) {
590 DRM_ERROR("UBO clamp would allow reads "
591 "outside of UBO\n");
592 goto fail;
593 }
594 *validated_p0 = tex->paddr + p0;
595 return true;
596 }
597
598 if (width == 0)
599 width = 2048;
600 if (height == 0)
601 height = 2048;
602
603 if (p0 & VC4_TEX_P0_CMMODE_MASK) {
604 if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
605 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
606 cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
607 if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
608 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
609 if (cube_map_stride) {
610 DRM_ERROR("Cube map stride set twice\n");
611 goto fail;
612 }
613
614 cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
615 }
616 if (!cube_map_stride) {
617 DRM_ERROR("Cube map stride not set\n");
618 goto fail;
619 }
620 }
621
622 type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
623 (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
624
625 switch (type) {
626 case VC4_TEXTURE_TYPE_RGBA8888:
627 case VC4_TEXTURE_TYPE_RGBX8888:
628 case VC4_TEXTURE_TYPE_RGBA32R:
629 cpp = 4;
630 break;
631 case VC4_TEXTURE_TYPE_RGBA4444:
632 case VC4_TEXTURE_TYPE_RGBA5551:
633 case VC4_TEXTURE_TYPE_RGB565:
634 case VC4_TEXTURE_TYPE_LUMALPHA:
635 case VC4_TEXTURE_TYPE_S16F:
636 case VC4_TEXTURE_TYPE_S16:
637 cpp = 2;
638 break;
639 case VC4_TEXTURE_TYPE_LUMINANCE:
640 case VC4_TEXTURE_TYPE_ALPHA:
641 case VC4_TEXTURE_TYPE_S8:
642 cpp = 1;
643 break;
644 case VC4_TEXTURE_TYPE_ETC1:
645 case VC4_TEXTURE_TYPE_BW1:
646 case VC4_TEXTURE_TYPE_A4:
647 case VC4_TEXTURE_TYPE_A1:
648 case VC4_TEXTURE_TYPE_RGBA64:
649 case VC4_TEXTURE_TYPE_YUV422R:
650 default:
651 DRM_ERROR("Texture format %d unsupported\n", type);
652 goto fail;
653 }
654 utile_w = utile_width(cpp);
655 utile_h = utile_height(cpp);
656
657 if (type == VC4_TEXTURE_TYPE_RGBA32R) {
658 tiling_format = VC4_TILING_FORMAT_LINEAR;
659 } else {
660 if (size_is_lt(width, height, cpp))
661 tiling_format = VC4_TILING_FORMAT_LT;
662 else
663 tiling_format = VC4_TILING_FORMAT_T;
664 }
665
666 if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
667 tiling_format, width, height, cpp)) {
668 goto fail;
669 }
670
671 /* The mipmap levels are stored before the base of the texture. Make
672 * sure there is actually space in the BO.
673 */
674 for (i = 1; i <= miplevels; i++) {
675 uint32_t level_width = max(width >> i, 1u);
676 uint32_t level_height = max(height >> i, 1u);
677 uint32_t aligned_width, aligned_height;
678 uint32_t level_size;
679
680 /* Once the levels get small enough, they drop from T to LT. */
681 if (tiling_format == VC4_TILING_FORMAT_T &&
682 size_is_lt(level_width, level_height, cpp)) {
683 tiling_format = VC4_TILING_FORMAT_LT;
684 }
685
686 switch (tiling_format) {
687 case VC4_TILING_FORMAT_T:
688 aligned_width = round_up(level_width, utile_w * 8);
689 aligned_height = round_up(level_height, utile_h * 8);
690 break;
691 case VC4_TILING_FORMAT_LT:
692 aligned_width = round_up(level_width, utile_w);
693 aligned_height = round_up(level_height, utile_h);
694 break;
695 default:
696 aligned_width = round_up(level_width, utile_w);
697 aligned_height = level_height;
698 break;
699 }
700
701 level_size = aligned_width * cpp * aligned_height;
702
703 if (offset < level_size) {
704 DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
705 "overflowed buffer bounds (offset %d)\n",
706 i, level_width, level_height,
707 aligned_width, aligned_height,
708 level_size, offset);
709 goto fail;
710 }
711
712 offset -= level_size;
713 }
714
715 *validated_p0 = tex->paddr + p0;
716
717 return true;
718 fail:
719 DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
720 DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1);
721 DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2);
722 DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3);
723 return false;
724}
725
726static int
727validate_gl_shader_rec(struct drm_device *dev,
728 struct vc4_exec_info *exec,
729 struct vc4_shader_state *state)
730{
731 uint32_t *src_handles;
732 void *pkt_u, *pkt_v;
733 static const uint32_t shader_reloc_offsets[] = {
734 4, /* fs */
735 16, /* vs */
736 28, /* cs */
737 };
738 uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
739 struct drm_gem_cma_object *bo[shader_reloc_count + 8];
740 uint32_t nr_attributes, nr_relocs, packet_size;
741 int i;
742
743 nr_attributes = state->addr & 0x7;
744 if (nr_attributes == 0)
745 nr_attributes = 8;
746 packet_size = gl_shader_rec_size(state->addr);
747
748 nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
749 if (nr_relocs * 4 > exec->shader_rec_size) {
750 DRM_ERROR("overflowed shader recs reading %d handles "
751 "from %d bytes left\n",
752 nr_relocs, exec->shader_rec_size);
753 return -EINVAL;
754 }
755 src_handles = exec->shader_rec_u;
756 exec->shader_rec_u += nr_relocs * 4;
757 exec->shader_rec_size -= nr_relocs * 4;
758
759 if (packet_size > exec->shader_rec_size) {
760 DRM_ERROR("overflowed shader recs copying %db packet "
761 "from %d bytes left\n",
762 packet_size, exec->shader_rec_size);
763 return -EINVAL;
764 }
765 pkt_u = exec->shader_rec_u;
766 pkt_v = exec->shader_rec_v;
767 memcpy(pkt_v, pkt_u, packet_size);
768 exec->shader_rec_u += packet_size;
769 /* Shader recs have to be aligned to 16 bytes (due to the attribute
770 * flags being in the low bytes), so round the next validated shader
771 * rec address up. This should be safe, since we've got so many
772 * relocations in a shader rec packet.
773 */
774 BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
775 exec->shader_rec_v += roundup(packet_size, 16);
776 exec->shader_rec_size -= packet_size;
777
778 if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
779 DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
780 return -EINVAL;
781 }
782
783 for (i = 0; i < shader_reloc_count; i++) {
784 if (src_handles[i] > exec->bo_count) {
785 DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
786 return -EINVAL;
787 }
788
789 bo[i] = exec->bo[src_handles[i]];
790 if (!bo[i])
791 return -EINVAL;
792 }
793 for (i = shader_reloc_count; i < nr_relocs; i++) {
794 bo[i] = vc4_use_bo(exec, src_handles[i]);
795 if (!bo[i])
796 return -EINVAL;
797 }
798
799 for (i = 0; i < shader_reloc_count; i++) {
800 struct vc4_validated_shader_info *validated_shader;
801 uint32_t o = shader_reloc_offsets[i];
802 uint32_t src_offset = *(uint32_t *)(pkt_u + o);
803 uint32_t *texture_handles_u;
804 void *uniform_data_u;
805 uint32_t tex;
806
807 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
808
809 if (src_offset != 0) {
810 DRM_ERROR("Shaders must be at offset 0 of "
811 "the BO.\n");
812 return -EINVAL;
813 }
814
815 validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
816 if (!validated_shader)
817 return -EINVAL;
818
819 if (validated_shader->uniforms_src_size >
820 exec->uniforms_size) {
821 DRM_ERROR("Uniforms src buffer overflow\n");
822 return -EINVAL;
823 }
824
825 texture_handles_u = exec->uniforms_u;
826 uniform_data_u = (texture_handles_u +
827 validated_shader->num_texture_samples);
828
829 memcpy(exec->uniforms_v, uniform_data_u,
830 validated_shader->uniforms_size);
831
832 for (tex = 0;
833 tex < validated_shader->num_texture_samples;
834 tex++) {
835 if (!reloc_tex(exec,
836 uniform_data_u,
837 &validated_shader->texture_samples[tex],
838 texture_handles_u[tex])) {
839 return -EINVAL;
840 }
841 }
842
843 *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
844
845 exec->uniforms_u += validated_shader->uniforms_src_size;
846 exec->uniforms_v += validated_shader->uniforms_size;
847 exec->uniforms_p += validated_shader->uniforms_size;
848 }
849
850 for (i = 0; i < nr_attributes; i++) {
851 struct drm_gem_cma_object *vbo =
852 bo[ARRAY_SIZE(shader_reloc_offsets) + i];
853 uint32_t o = 36 + i * 8;
854 uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
855 uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
856 uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
857 uint32_t max_index;
858
859 if (state->addr & 0x8)
860 stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
861
862 if (vbo->base.size < offset ||
863 vbo->base.size - offset < attr_size) {
864 DRM_ERROR("BO offset overflow (%d + %d > %d)\n",
865 offset, attr_size, vbo->base.size);
866 return -EINVAL;
867 }
868
869 if (stride != 0) {
870 max_index = ((vbo->base.size - offset - attr_size) /
871 stride);
872 if (state->max_index > max_index) {
873 DRM_ERROR("primitives use index %d out of "
874 "supplied %d\n",
875 state->max_index, max_index);
876 return -EINVAL;
877 }
878 }
879
880 *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
881 }
882
883 return 0;
884}
885
886int
887vc4_validate_shader_recs(struct drm_device *dev,
888 struct vc4_exec_info *exec)
889{
890 uint32_t i;
891 int ret = 0;
892
893 for (i = 0; i < exec->shader_state_count; i++) {
894 ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
895 if (ret)
896 return ret;
897 }
898
899 return ret;
900}
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
new file mode 100644
index 000000000000..f67124b4c534
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -0,0 +1,513 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: Shader validator for VC4.
26 *
27 * The VC4 has no IOMMU between it and system memory, so a user with
28 * access to execute shaders could escalate privilege by overwriting
29 * system memory (using the VPM write address register in the
30 * general-purpose DMA mode) or reading system memory it shouldn't
31 * (reading it as a texture, or uniform data, or vertex data).
32 *
33 * This walks over a shader BO, ensuring that its accesses are
34 * appropriately bounded, and recording how many texture accesses are
35 * made and where so that we can do relocations for them in the
36 * uniform stream.
37 */
38
39#include "vc4_drv.h"
40#include "vc4_qpu_defines.h"
41
42struct vc4_shader_validation_state {
43 struct vc4_texture_sample_info tmu_setup[2];
44 int tmu_write_count[2];
45
46 /* For registers that were last written to by a MIN instruction with
47 * one argument being a uniform, the address of the uniform.
48 * Otherwise, ~0.
49 *
50 * This is used for the validation of direct address memory reads.
51 */
52 uint32_t live_min_clamp_offsets[32 + 32 + 4];
53 bool live_max_clamp_regs[32 + 32 + 4];
54};
55
56static uint32_t
57waddr_to_live_reg_index(uint32_t waddr, bool is_b)
58{
59 if (waddr < 32) {
60 if (is_b)
61 return 32 + waddr;
62 else
63 return waddr;
64 } else if (waddr <= QPU_W_ACC3) {
65 return 64 + waddr - QPU_W_ACC0;
66 } else {
67 return ~0;
68 }
69}
70
71static uint32_t
72raddr_add_a_to_live_reg_index(uint64_t inst)
73{
74 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
75 uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
76 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
77 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
78
79 if (add_a == QPU_MUX_A)
80 return raddr_a;
81 else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM)
82 return 32 + raddr_b;
83 else if (add_a <= QPU_MUX_R3)
84 return 64 + add_a;
85 else
86 return ~0;
87}
88
89static bool
90is_tmu_submit(uint32_t waddr)
91{
92 return (waddr == QPU_W_TMU0_S ||
93 waddr == QPU_W_TMU1_S);
94}
95
96static bool
97is_tmu_write(uint32_t waddr)
98{
99 return (waddr >= QPU_W_TMU0_S &&
100 waddr <= QPU_W_TMU1_B);
101}
102
103static bool
104record_texture_sample(struct vc4_validated_shader_info *validated_shader,
105 struct vc4_shader_validation_state *validation_state,
106 int tmu)
107{
108 uint32_t s = validated_shader->num_texture_samples;
109 int i;
110 struct vc4_texture_sample_info *temp_samples;
111
112 temp_samples = krealloc(validated_shader->texture_samples,
113 (s + 1) * sizeof(*temp_samples),
114 GFP_KERNEL);
115 if (!temp_samples)
116 return false;
117
118 memcpy(&temp_samples[s],
119 &validation_state->tmu_setup[tmu],
120 sizeof(*temp_samples));
121
122 validated_shader->num_texture_samples = s + 1;
123 validated_shader->texture_samples = temp_samples;
124
125 for (i = 0; i < 4; i++)
126 validation_state->tmu_setup[tmu].p_offset[i] = ~0;
127
128 return true;
129}
130
131static bool
132check_tmu_write(uint64_t inst,
133 struct vc4_validated_shader_info *validated_shader,
134 struct vc4_shader_validation_state *validation_state,
135 bool is_mul)
136{
137 uint32_t waddr = (is_mul ?
138 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
139 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
140 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
141 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
142 int tmu = waddr > QPU_W_TMU0_B;
143 bool submit = is_tmu_submit(waddr);
144 bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
145 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
146
147 if (is_direct) {
148 uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
149 uint32_t clamp_reg, clamp_offset;
150
151 if (sig == QPU_SIG_SMALL_IMM) {
152 DRM_ERROR("direct TMU read used small immediate\n");
153 return false;
154 }
155
156 /* Make sure that this texture load is an add of the base
157 * address of the UBO to a clamped offset within the UBO.
158 */
159 if (is_mul ||
160 QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
161 DRM_ERROR("direct TMU load wasn't an add\n");
162 return false;
163 }
164
165 /* We assert that the the clamped address is the first
166 * argument, and the UBO base address is the second argument.
167 * This is arbitrary, but simpler than supporting flipping the
168 * two either way.
169 */
170 clamp_reg = raddr_add_a_to_live_reg_index(inst);
171 if (clamp_reg == ~0) {
172 DRM_ERROR("direct TMU load wasn't clamped\n");
173 return false;
174 }
175
176 clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
177 if (clamp_offset == ~0) {
178 DRM_ERROR("direct TMU load wasn't clamped\n");
179 return false;
180 }
181
182 /* Store the clamp value's offset in p1 (see reloc_tex() in
183 * vc4_validate.c).
184 */
185 validation_state->tmu_setup[tmu].p_offset[1] =
186 clamp_offset;
187
188 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
189 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
190 DRM_ERROR("direct TMU load didn't add to a uniform\n");
191 return false;
192 }
193
194 validation_state->tmu_setup[tmu].is_direct = true;
195 } else {
196 if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
197 raddr_b == QPU_R_UNIF)) {
198 DRM_ERROR("uniform read in the same instruction as "
199 "texture setup.\n");
200 return false;
201 }
202 }
203
204 if (validation_state->tmu_write_count[tmu] >= 4) {
205 DRM_ERROR("TMU%d got too many parameters before dispatch\n",
206 tmu);
207 return false;
208 }
209 validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
210 validated_shader->uniforms_size;
211 validation_state->tmu_write_count[tmu]++;
212 /* Since direct uses a RADDR uniform reference, it will get counted in
213 * check_instruction_reads()
214 */
215 if (!is_direct)
216 validated_shader->uniforms_size += 4;
217
218 if (submit) {
219 if (!record_texture_sample(validated_shader,
220 validation_state, tmu)) {
221 return false;
222 }
223
224 validation_state->tmu_write_count[tmu] = 0;
225 }
226
227 return true;
228}
229
230static bool
231check_reg_write(uint64_t inst,
232 struct vc4_validated_shader_info *validated_shader,
233 struct vc4_shader_validation_state *validation_state,
234 bool is_mul)
235{
236 uint32_t waddr = (is_mul ?
237 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
238 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
239
240 switch (waddr) {
241 case QPU_W_UNIFORMS_ADDRESS:
242 /* XXX: We'll probably need to support this for reladdr, but
243 * it's definitely a security-related one.
244 */
245 DRM_ERROR("uniforms address load unsupported\n");
246 return false;
247
248 case QPU_W_TLB_COLOR_MS:
249 case QPU_W_TLB_COLOR_ALL:
250 case QPU_W_TLB_Z:
251 /* These only interact with the tile buffer, not main memory,
252 * so they're safe.
253 */
254 return true;
255
256 case QPU_W_TMU0_S:
257 case QPU_W_TMU0_T:
258 case QPU_W_TMU0_R:
259 case QPU_W_TMU0_B:
260 case QPU_W_TMU1_S:
261 case QPU_W_TMU1_T:
262 case QPU_W_TMU1_R:
263 case QPU_W_TMU1_B:
264 return check_tmu_write(inst, validated_shader, validation_state,
265 is_mul);
266
267 case QPU_W_HOST_INT:
268 case QPU_W_TMU_NOSWAP:
269 case QPU_W_TLB_ALPHA_MASK:
270 case QPU_W_MUTEX_RELEASE:
271 /* XXX: I haven't thought about these, so don't support them
272 * for now.
273 */
274 DRM_ERROR("Unsupported waddr %d\n", waddr);
275 return false;
276
277 case QPU_W_VPM_ADDR:
278 DRM_ERROR("General VPM DMA unsupported\n");
279 return false;
280
281 case QPU_W_VPM:
282 case QPU_W_VPMVCD_SETUP:
283 /* We allow VPM setup in general, even including VPM DMA
284 * configuration setup, because the (unsafe) DMA can only be
285 * triggered by QPU_W_VPM_ADDR writes.
286 */
287 return true;
288
289 case QPU_W_TLB_STENCIL_SETUP:
290 return true;
291 }
292
293 return true;
294}
295
296static void
297track_live_clamps(uint64_t inst,
298 struct vc4_validated_shader_info *validated_shader,
299 struct vc4_shader_validation_state *validation_state)
300{
301 uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
302 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
303 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
304 uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
305 uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
306 uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
307 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
308 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
309 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
310 bool ws = inst & QPU_WS;
311 uint32_t lri_add_a, lri_add, lri_mul;
312 bool add_a_is_min_0;
313
314 /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
315 * before we clear previous live state.
316 */
317 lri_add_a = raddr_add_a_to_live_reg_index(inst);
318 add_a_is_min_0 = (lri_add_a != ~0 &&
319 validation_state->live_max_clamp_regs[lri_add_a]);
320
321 /* Clear live state for registers written by our instruction. */
322 lri_add = waddr_to_live_reg_index(waddr_add, ws);
323 lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
324 if (lri_mul != ~0) {
325 validation_state->live_max_clamp_regs[lri_mul] = false;
326 validation_state->live_min_clamp_offsets[lri_mul] = ~0;
327 }
328 if (lri_add != ~0) {
329 validation_state->live_max_clamp_regs[lri_add] = false;
330 validation_state->live_min_clamp_offsets[lri_add] = ~0;
331 } else {
332 /* Nothing further to do for live tracking, since only ADDs
333 * generate new live clamp registers.
334 */
335 return;
336 }
337
338 /* Now, handle remaining live clamp tracking for the ADD operation. */
339
340 if (cond_add != QPU_COND_ALWAYS)
341 return;
342
343 if (op_add == QPU_A_MAX) {
344 /* Track live clamps of a value to a minimum of 0 (in either
345 * arg).
346 */
347 if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
348 (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
349 return;
350 }
351
352 validation_state->live_max_clamp_regs[lri_add] = true;
353 } else if (op_add == QPU_A_MIN) {
354 /* Track live clamps of a value clamped to a minimum of 0 and
355 * a maximum of some uniform's offset.
356 */
357 if (!add_a_is_min_0)
358 return;
359
360 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
361 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
362 sig != QPU_SIG_SMALL_IMM)) {
363 return;
364 }
365
366 validation_state->live_min_clamp_offsets[lri_add] =
367 validated_shader->uniforms_size;
368 }
369}
370
371static bool
372check_instruction_writes(uint64_t inst,
373 struct vc4_validated_shader_info *validated_shader,
374 struct vc4_shader_validation_state *validation_state)
375{
376 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
377 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
378 bool ok;
379
380 if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
381 DRM_ERROR("ADD and MUL both set up textures\n");
382 return false;
383 }
384
385 ok = (check_reg_write(inst, validated_shader, validation_state,
386 false) &&
387 check_reg_write(inst, validated_shader, validation_state,
388 true));
389
390 track_live_clamps(inst, validated_shader, validation_state);
391
392 return ok;
393}
394
395static bool
396check_instruction_reads(uint64_t inst,
397 struct vc4_validated_shader_info *validated_shader)
398{
399 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
400 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
401 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
402
403 if (raddr_a == QPU_R_UNIF ||
404 (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
405 /* This can't overflow the uint32_t, because we're reading 8
406 * bytes of instruction to increment by 4 here, so we'd
407 * already be OOM.
408 */
409 validated_shader->uniforms_size += 4;
410 }
411
412 return true;
413}
414
415struct vc4_validated_shader_info *
416vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
417{
418 bool found_shader_end = false;
419 int shader_end_ip = 0;
420 uint32_t ip, max_ip;
421 uint64_t *shader;
422 struct vc4_validated_shader_info *validated_shader;
423 struct vc4_shader_validation_state validation_state;
424 int i;
425
426 memset(&validation_state, 0, sizeof(validation_state));
427
428 for (i = 0; i < 8; i++)
429 validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
430 for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
431 validation_state.live_min_clamp_offsets[i] = ~0;
432
433 shader = shader_obj->vaddr;
434 max_ip = shader_obj->base.size / sizeof(uint64_t);
435
436 validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
437 if (!validated_shader)
438 return NULL;
439
440 for (ip = 0; ip < max_ip; ip++) {
441 uint64_t inst = shader[ip];
442 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
443
444 switch (sig) {
445 case QPU_SIG_NONE:
446 case QPU_SIG_WAIT_FOR_SCOREBOARD:
447 case QPU_SIG_SCOREBOARD_UNLOCK:
448 case QPU_SIG_COLOR_LOAD:
449 case QPU_SIG_LOAD_TMU0:
450 case QPU_SIG_LOAD_TMU1:
451 case QPU_SIG_PROG_END:
452 case QPU_SIG_SMALL_IMM:
453 if (!check_instruction_writes(inst, validated_shader,
454 &validation_state)) {
455 DRM_ERROR("Bad write at ip %d\n", ip);
456 goto fail;
457 }
458
459 if (!check_instruction_reads(inst, validated_shader))
460 goto fail;
461
462 if (sig == QPU_SIG_PROG_END) {
463 found_shader_end = true;
464 shader_end_ip = ip;
465 }
466
467 break;
468
469 case QPU_SIG_LOAD_IMM:
470 if (!check_instruction_writes(inst, validated_shader,
471 &validation_state)) {
472 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
473 goto fail;
474 }
475 break;
476
477 default:
478 DRM_ERROR("Unsupported QPU signal %d at "
479 "instruction %d\n", sig, ip);
480 goto fail;
481 }
482
483 /* There are two delay slots after program end is signaled
484 * that are still executed, then we're finished.
485 */
486 if (found_shader_end && ip == shader_end_ip + 2)
487 break;
488 }
489
490 if (ip == max_ip) {
491 DRM_ERROR("shader failed to terminate before "
492 "shader BO end at %zd\n",
493 shader_obj->base.size);
494 goto fail;
495 }
496
497 /* Again, no chance of integer overflow here because the worst case
498 * scenario is 8 bytes of uniforms plus handles per 8-byte
499 * instruction.
500 */
501 validated_shader->uniforms_src_size =
502 (validated_shader->uniforms_size +
503 4 * validated_shader->num_texture_samples);
504
505 return validated_shader;
506
507fail:
508 if (validated_shader) {
509 kfree(validated_shader->texture_samples);
510 kfree(validated_shader);
511 }
512 return NULL;
513}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 8e6044d7660a..a165f03eaa79 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -374,16 +374,6 @@ static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
374 .best_encoder = virtio_gpu_best_encoder, 374 .best_encoder = virtio_gpu_best_encoder,
375}; 375};
376 376
377static void virtio_gpu_conn_save(struct drm_connector *connector)
378{
379 DRM_DEBUG("\n");
380}
381
382static void virtio_gpu_conn_restore(struct drm_connector *connector)
383{
384 DRM_DEBUG("\n");
385}
386
387static enum drm_connector_status virtio_gpu_conn_detect( 377static enum drm_connector_status virtio_gpu_conn_detect(
388 struct drm_connector *connector, 378 struct drm_connector *connector,
389 bool force) 379 bool force)
@@ -409,10 +399,8 @@ static void virtio_gpu_conn_destroy(struct drm_connector *connector)
409 399
410static const struct drm_connector_funcs virtio_gpu_connector_funcs = { 400static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
411 .dpms = drm_atomic_helper_connector_dpms, 401 .dpms = drm_atomic_helper_connector_dpms,
412 .save = virtio_gpu_conn_save,
413 .restore = virtio_gpu_conn_restore,
414 .detect = virtio_gpu_conn_detect, 402 .detect = virtio_gpu_conn_detect,
415 .fill_modes = drm_helper_probe_single_connector_modes_nomerge, 403 .fill_modes = drm_helper_probe_single_connector_modes,
416 .destroy = virtio_gpu_conn_destroy, 404 .destroy = virtio_gpu_conn_destroy,
417 .reset = drm_atomic_helper_connector_reset, 405 .reset = drm_atomic_helper_connector_reset,
418 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 406 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -443,7 +431,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
443 if (IS_ERR(plane)) 431 if (IS_ERR(plane))
444 return PTR_ERR(plane); 432 return PTR_ERR(plane);
445 drm_crtc_init_with_planes(dev, crtc, plane, NULL, 433 drm_crtc_init_with_planes(dev, crtc, plane, NULL,
446 &virtio_gpu_crtc_funcs); 434 &virtio_gpu_crtc_funcs, NULL);
447 drm_mode_crtc_set_gamma_size(crtc, 256); 435 drm_mode_crtc_set_gamma_size(crtc, 256);
448 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); 436 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
449 plane->crtc = crtc; 437 plane->crtc = crtc;
@@ -453,7 +441,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
453 drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs); 441 drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
454 442
455 drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs, 443 drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
456 DRM_MODE_ENCODER_VIRTUAL); 444 DRM_MODE_ENCODER_VIRTUAL, NULL);
457 drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs); 445 drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
458 encoder->possible_crtcs = 1 << index; 446 encoder->possible_crtcs = 1 << index;
459 447
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4a74129c5708..572fb351feab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -107,7 +107,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
107 &virtio_gpu_plane_funcs, 107 &virtio_gpu_plane_funcs,
108 virtio_gpu_formats, 108 virtio_gpu_formats,
109 ARRAY_SIZE(virtio_gpu_formats), 109 ARRAY_SIZE(virtio_gpu_formats),
110 DRM_PLANE_TYPE_PRIMARY); 110 DRM_PLANE_TYPE_PRIMARY, NULL);
111 if (ret) 111 if (ret)
112 goto err_plane_init; 112 goto err_plane_init;
113 113
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 6377e8151000..67cebb23c940 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
247{ 247{
248 struct vmw_cmdbuf_man *man = header->man; 248 struct vmw_cmdbuf_man *man = header->man;
249 249
250 BUG_ON(!spin_is_locked(&man->lock)); 250 lockdep_assert_held_once(&man->lock);
251 251
252 if (header->inline_space) { 252 if (header->inline_space) {
253 vmw_cmdbuf_header_inline_free(header); 253 vmw_cmdbuf_header_inline_free(header);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a09cf8529b9f..c49812b80dd0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1233,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev,
1233 1233
1234 vmw_fp->locked_master = drm_master_get(file_priv->master); 1234 vmw_fp->locked_master = drm_master_get(file_priv->master);
1235 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 1235 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1236 vmw_kms_legacy_hotspot_clear(dev_priv);
1236 if (unlikely((ret != 0))) { 1237 if (unlikely((ret != 0))) {
1237 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 1238 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1238 drm_master_put(&vmw_fp->locked_master); 1239 drm_master_put(&vmw_fp->locked_master);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a8ae9dfb83b7..469cdd520615 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -925,6 +925,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
925 uint32_t num_clips); 925 uint32_t num_clips);
926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv); 927 struct drm_file *file_priv);
928void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
928 929
929int vmw_dumb_create(struct drm_file *file_priv, 930int vmw_dumb_create(struct drm_file *file_priv,
930 struct drm_device *dev, 931 struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a8baf5f5e765..b6a0806b06bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -390,7 +390,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
390 else if (ctx_id == SVGA3D_INVALID_ID) 390 else if (ctx_id == SVGA3D_INVALID_ID)
391 ret = vmw_local_fifo_reserve(dev_priv, bytes); 391 ret = vmw_local_fifo_reserve(dev_priv, bytes);
392 else { 392 else {
393 WARN_ON("Command buffer has not been allocated.\n"); 393 WARN(1, "Command buffer has not been allocated.\n");
394 ret = NULL; 394 ret = NULL;
395 } 395 }
396 if (IS_ERR_OR_NULL(ret)) { 396 if (IS_ERR_OR_NULL(ret)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e38db35132ed..b221a8c40282 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -133,13 +133,19 @@ void vmw_cursor_update_position(struct vmw_private *dev_priv,
133 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 133 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
134} 134}
135 135
136int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 136
137 uint32_t handle, uint32_t width, uint32_t height) 137/*
138 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
139 */
140int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
141 uint32_t handle, uint32_t width, uint32_t height,
142 int32_t hot_x, int32_t hot_y)
138{ 143{
139 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 144 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
140 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 145 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
141 struct vmw_surface *surface = NULL; 146 struct vmw_surface *surface = NULL;
142 struct vmw_dma_buffer *dmabuf = NULL; 147 struct vmw_dma_buffer *dmabuf = NULL;
148 s32 hotspot_x, hotspot_y;
143 int ret; 149 int ret;
144 150
145 /* 151 /*
@@ -151,6 +157,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
151 */ 157 */
152 drm_modeset_unlock_crtc(crtc); 158 drm_modeset_unlock_crtc(crtc);
153 drm_modeset_lock_all(dev_priv->dev); 159 drm_modeset_lock_all(dev_priv->dev);
160 hotspot_x = hot_x + du->hotspot_x;
161 hotspot_y = hot_y + du->hotspot_y;
154 162
155 /* A lot of the code assumes this */ 163 /* A lot of the code assumes this */
156 if (handle && (width != 64 || height != 64)) { 164 if (handle && (width != 64 || height != 64)) {
@@ -187,31 +195,34 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
187 vmw_dmabuf_unreference(&du->cursor_dmabuf); 195 vmw_dmabuf_unreference(&du->cursor_dmabuf);
188 196
189 /* setup new image */ 197 /* setup new image */
198 ret = 0;
190 if (surface) { 199 if (surface) {
191 /* vmw_user_surface_lookup takes one reference */ 200 /* vmw_user_surface_lookup takes one reference */
192 du->cursor_surface = surface; 201 du->cursor_surface = surface;
193 202
194 du->cursor_surface->snooper.crtc = crtc; 203 du->cursor_surface->snooper.crtc = crtc;
195 du->cursor_age = du->cursor_surface->snooper.age; 204 du->cursor_age = du->cursor_surface->snooper.age;
196 vmw_cursor_update_image(dev_priv, surface->snooper.image, 205 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
197 64, 64, du->hotspot_x, du->hotspot_y); 206 64, 64, hotspot_x, hotspot_y);
198 } else if (dmabuf) { 207 } else if (dmabuf) {
199 /* vmw_user_surface_lookup takes one reference */ 208 /* vmw_user_surface_lookup takes one reference */
200 du->cursor_dmabuf = dmabuf; 209 du->cursor_dmabuf = dmabuf;
201 210
202 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, 211 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
203 du->hotspot_x, du->hotspot_y); 212 hotspot_x, hotspot_y);
204 } else { 213 } else {
205 vmw_cursor_update_position(dev_priv, false, 0, 0); 214 vmw_cursor_update_position(dev_priv, false, 0, 0);
206 ret = 0;
207 goto out; 215 goto out;
208 } 216 }
209 217
210 vmw_cursor_update_position(dev_priv, true, 218 if (!ret) {
211 du->cursor_x + du->hotspot_x, 219 vmw_cursor_update_position(dev_priv, true,
212 du->cursor_y + du->hotspot_y); 220 du->cursor_x + hotspot_x,
221 du->cursor_y + hotspot_y);
222 du->core_hotspot_x = hot_x;
223 du->core_hotspot_y = hot_y;
224 }
213 225
214 ret = 0;
215out: 226out:
216 drm_modeset_unlock_all(dev_priv->dev); 227 drm_modeset_unlock_all(dev_priv->dev);
217 drm_modeset_lock_crtc(crtc, crtc->cursor); 228 drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -239,8 +250,10 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
239 drm_modeset_lock_all(dev_priv->dev); 250 drm_modeset_lock_all(dev_priv->dev);
240 251
241 vmw_cursor_update_position(dev_priv, shown, 252 vmw_cursor_update_position(dev_priv, shown,
242 du->cursor_x + du->hotspot_x, 253 du->cursor_x + du->hotspot_x +
243 du->cursor_y + du->hotspot_y); 254 du->core_hotspot_x,
255 du->cursor_y + du->hotspot_y +
256 du->core_hotspot_y);
244 257
245 drm_modeset_unlock_all(dev_priv->dev); 258 drm_modeset_unlock_all(dev_priv->dev);
246 drm_modeset_lock_crtc(crtc, crtc->cursor); 259 drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -334,6 +347,29 @@ err_unreserve:
334 ttm_bo_unreserve(bo); 347 ttm_bo_unreserve(bo);
335} 348}
336 349
350/**
351 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
352 *
353 * @dev_priv: Pointer to the device private struct.
354 *
355 * Clears all legacy hotspots.
356 */
357void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
358{
359 struct drm_device *dev = dev_priv->dev;
360 struct vmw_display_unit *du;
361 struct drm_crtc *crtc;
362
363 drm_modeset_lock_all(dev);
364 drm_for_each_crtc(crtc, dev) {
365 du = vmw_crtc_to_du(crtc);
366
367 du->hotspot_x = 0;
368 du->hotspot_y = 0;
369 }
370 drm_modeset_unlock_all(dev);
371}
372
337void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 373void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
338{ 374{
339 struct drm_device *dev = dev_priv->dev; 375 struct drm_device *dev = dev_priv->dev;
@@ -351,7 +387,9 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
351 du->cursor_age = du->cursor_surface->snooper.age; 387 du->cursor_age = du->cursor_surface->snooper.age;
352 vmw_cursor_update_image(dev_priv, 388 vmw_cursor_update_image(dev_priv,
353 du->cursor_surface->snooper.image, 389 du->cursor_surface->snooper.image,
354 64, 64, du->hotspot_x, du->hotspot_y); 390 64, 64,
391 du->hotspot_x + du->core_hotspot_x,
392 du->hotspot_y + du->core_hotspot_y);
355 } 393 }
356 394
357 mutex_unlock(&dev->mode_config.mutex); 395 mutex_unlock(&dev->mode_config.mutex);
@@ -470,7 +508,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
470} 508}
471 509
472 510
473static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 511static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
474 .destroy = vmw_framebuffer_surface_destroy, 512 .destroy = vmw_framebuffer_surface_destroy,
475 .dirty = vmw_framebuffer_surface_dirty, 513 .dirty = vmw_framebuffer_surface_dirty,
476}; 514};
@@ -647,7 +685,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
647 return ret; 685 return ret;
648} 686}
649 687
650static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { 688static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
651 .destroy = vmw_framebuffer_dmabuf_destroy, 689 .destroy = vmw_framebuffer_dmabuf_destroy,
652 .dirty = vmw_framebuffer_dmabuf_dirty, 690 .dirty = vmw_framebuffer_dmabuf_dirty,
653}; 691};
@@ -725,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
725 uint32_t format; 763 uint32_t format;
726 struct drm_vmw_size content_base_size; 764 struct drm_vmw_size content_base_size;
727 struct vmw_resource *res; 765 struct vmw_resource *res;
766 unsigned int bytes_pp;
728 int ret; 767 int ret;
729 768
730 switch (mode_cmd->depth) { 769 switch (mode_cmd->depth) {
731 case 32: 770 case 32:
732 case 24: 771 case 24:
733 format = SVGA3D_X8R8G8B8; 772 format = SVGA3D_X8R8G8B8;
773 bytes_pp = 4;
734 break; 774 break;
735 775
736 case 16: 776 case 16:
737 case 15: 777 case 15:
738 format = SVGA3D_R5G6B5; 778 format = SVGA3D_R5G6B5;
779 bytes_pp = 2;
739 break; 780 break;
740 781
741 case 8: 782 case 8:
742 format = SVGA3D_P8; 783 format = SVGA3D_P8;
784 bytes_pp = 1;
743 break; 785 break;
744 786
745 default: 787 default:
@@ -747,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
747 return -EINVAL; 789 return -EINVAL;
748 } 790 }
749 791
750 content_base_size.width = mode_cmd->width; 792 content_base_size.width = mode_cmd->pitch / bytes_pp;
751 content_base_size.height = mode_cmd->height; 793 content_base_size.height = mode_cmd->height;
752 content_base_size.depth = 1; 794 content_base_size.depth = 1;
753 795
@@ -1331,14 +1373,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1331 return 0; 1373 return 0;
1332} 1374}
1333 1375
1334void vmw_du_crtc_save(struct drm_crtc *crtc)
1335{
1336}
1337
1338void vmw_du_crtc_restore(struct drm_crtc *crtc)
1339{
1340}
1341
1342void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 1376void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1343 u16 *r, u16 *g, u16 *b, 1377 u16 *r, u16 *g, u16 *b,
1344 uint32_t start, uint32_t size) 1378 uint32_t start, uint32_t size)
@@ -1360,14 +1394,6 @@ int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1360 return 0; 1394 return 0;
1361} 1395}
1362 1396
1363void vmw_du_connector_save(struct drm_connector *connector)
1364{
1365}
1366
1367void vmw_du_connector_restore(struct drm_connector *connector)
1368{
1369}
1370
1371enum drm_connector_status 1397enum drm_connector_status
1372vmw_du_connector_detect(struct drm_connector *connector, bool force) 1398vmw_du_connector_detect(struct drm_connector *connector, bool force)
1373{ 1399{
@@ -1554,7 +1580,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1554 drm_mode_probed_add(connector, mode); 1580 drm_mode_probed_add(connector, mode);
1555 } 1581 }
1556 1582
1557 drm_mode_connector_list_update(connector, true); 1583 drm_mode_connector_list_update(connector);
1558 /* Move the prefered mode first, help apps pick the right mode. */ 1584 /* Move the prefered mode first, help apps pick the right mode. */
1559 drm_mode_sort(&connector->modes); 1585 drm_mode_sort(&connector->modes);
1560 1586
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 782df7ca9794..edd81503516d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -159,6 +159,8 @@ struct vmw_display_unit {
159 159
160 int hotspot_x; 160 int hotspot_x;
161 int hotspot_y; 161 int hotspot_y;
162 s32 core_hotspot_x;
163 s32 core_hotspot_y;
162 164
163 unsigned unit; 165 unsigned unit;
164 166
@@ -193,8 +195,9 @@ void vmw_du_crtc_restore(struct drm_crtc *crtc);
193void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 195void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
194 u16 *r, u16 *g, u16 *b, 196 u16 *r, u16 *g, u16 *b,
195 uint32_t start, uint32_t size); 197 uint32_t start, uint32_t size);
196int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 198int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
197 uint32_t handle, uint32_t width, uint32_t height); 199 uint32_t handle, uint32_t width, uint32_t height,
200 int32_t hot_x, int32_t hot_y);
198int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 201int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
199int vmw_du_connector_dpms(struct drm_connector *connector, int mode); 202int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
200void vmw_du_connector_save(struct drm_connector *connector); 203void vmw_du_connector_save(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index bb63e4d795fa..b6fa44fe8929 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -294,10 +294,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
294 return vmw_ldu_commit_list(dev_priv); 294 return vmw_ldu_commit_list(dev_priv);
295} 295}
296 296
297static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { 297static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
298 .save = vmw_du_crtc_save, 298 .cursor_set2 = vmw_du_crtc_cursor_set2,
299 .restore = vmw_du_crtc_restore,
300 .cursor_set = vmw_du_crtc_cursor_set,
301 .cursor_move = vmw_du_crtc_cursor_move, 299 .cursor_move = vmw_du_crtc_cursor_move,
302 .gamma_set = vmw_du_crtc_gamma_set, 300 .gamma_set = vmw_du_crtc_gamma_set,
303 .destroy = vmw_ldu_crtc_destroy, 301 .destroy = vmw_ldu_crtc_destroy,
@@ -314,7 +312,7 @@ static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
314 vmw_ldu_destroy(vmw_encoder_to_ldu(encoder)); 312 vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
315} 313}
316 314
317static struct drm_encoder_funcs vmw_legacy_encoder_funcs = { 315static const struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
318 .destroy = vmw_ldu_encoder_destroy, 316 .destroy = vmw_ldu_encoder_destroy,
319}; 317};
320 318
@@ -327,10 +325,8 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
327 vmw_ldu_destroy(vmw_connector_to_ldu(connector)); 325 vmw_ldu_destroy(vmw_connector_to_ldu(connector));
328} 326}
329 327
330static struct drm_connector_funcs vmw_legacy_connector_funcs = { 328static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
331 .dpms = vmw_du_connector_dpms, 329 .dpms = vmw_du_connector_dpms,
332 .save = vmw_du_connector_save,
333 .restore = vmw_du_connector_restore,
334 .detect = vmw_du_connector_detect, 330 .detect = vmw_du_connector_detect,
335 .fill_modes = vmw_du_connector_fill_modes, 331 .fill_modes = vmw_du_connector_fill_modes,
336 .set_property = vmw_du_connector_set_property, 332 .set_property = vmw_du_connector_set_property,
@@ -367,7 +363,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
367 connector->status = vmw_du_connector_detect(connector, true); 363 connector->status = vmw_du_connector_detect(connector, true);
368 364
369 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 365 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
370 DRM_MODE_ENCODER_VIRTUAL); 366 DRM_MODE_ENCODER_VIRTUAL, NULL);
371 drm_mode_connector_attach_encoder(connector, encoder); 367 drm_mode_connector_attach_encoder(connector, encoder);
372 encoder->possible_crtcs = (1 << unit); 368 encoder->possible_crtcs = (1 << unit);
373 encoder->possible_clones = 0; 369 encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b96d1ab610c5..db082bea8daf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -530,10 +530,8 @@ out_no_fence:
530 return ret; 530 return ret;
531} 531}
532 532
533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { 533static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
534 .save = vmw_du_crtc_save, 534 .cursor_set2 = vmw_du_crtc_cursor_set2,
535 .restore = vmw_du_crtc_restore,
536 .cursor_set = vmw_du_crtc_cursor_set,
537 .cursor_move = vmw_du_crtc_cursor_move, 535 .cursor_move = vmw_du_crtc_cursor_move,
538 .gamma_set = vmw_du_crtc_gamma_set, 536 .gamma_set = vmw_du_crtc_gamma_set,
539 .destroy = vmw_sou_crtc_destroy, 537 .destroy = vmw_sou_crtc_destroy,
@@ -550,7 +548,7 @@ static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
550 vmw_sou_destroy(vmw_encoder_to_sou(encoder)); 548 vmw_sou_destroy(vmw_encoder_to_sou(encoder));
551} 549}
552 550
553static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = { 551static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
554 .destroy = vmw_sou_encoder_destroy, 552 .destroy = vmw_sou_encoder_destroy,
555}; 553};
556 554
@@ -563,12 +561,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
563 vmw_sou_destroy(vmw_connector_to_sou(connector)); 561 vmw_sou_destroy(vmw_connector_to_sou(connector));
564} 562}
565 563
566static struct drm_connector_funcs vmw_sou_connector_funcs = { 564static const struct drm_connector_funcs vmw_sou_connector_funcs = {
567 .dpms = vmw_du_connector_dpms, 565 .dpms = vmw_du_connector_dpms,
568 .save = vmw_du_connector_save,
569 .restore = vmw_du_connector_restore,
570 .detect = vmw_du_connector_detect,
571 .fill_modes = vmw_du_connector_fill_modes,
572 .set_property = vmw_du_connector_set_property, 566 .set_property = vmw_du_connector_set_property,
573 .destroy = vmw_sou_connector_destroy, 567 .destroy = vmw_sou_connector_destroy,
574}; 568};
@@ -603,7 +597,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
603 connector->status = vmw_du_connector_detect(connector, true); 597 connector->status = vmw_du_connector_detect(connector, true);
604 598
605 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, 599 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
606 DRM_MODE_ENCODER_VIRTUAL); 600 DRM_MODE_ENCODER_VIRTUAL, NULL);
607 drm_mode_connector_attach_encoder(connector, encoder); 601 drm_mode_connector_attach_encoder(connector, encoder);
608 encoder->possible_crtcs = (1 << unit); 602 encoder->possible_crtcs = (1 << unit);
609 encoder->possible_clones = 0; 603 encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b1fc1c02792d..4ef5ffd7189d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1040,10 +1040,8 @@ out_finish:
1040/* 1040/*
1041 * Screen Target CRTC dispatch table 1041 * Screen Target CRTC dispatch table
1042 */ 1042 */
1043static struct drm_crtc_funcs vmw_stdu_crtc_funcs = { 1043static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
1044 .save = vmw_du_crtc_save, 1044 .cursor_set2 = vmw_du_crtc_cursor_set2,
1045 .restore = vmw_du_crtc_restore,
1046 .cursor_set = vmw_du_crtc_cursor_set,
1047 .cursor_move = vmw_du_crtc_cursor_move, 1045 .cursor_move = vmw_du_crtc_cursor_move,
1048 .gamma_set = vmw_du_crtc_gamma_set, 1046 .gamma_set = vmw_du_crtc_gamma_set,
1049 .destroy = vmw_stdu_crtc_destroy, 1047 .destroy = vmw_stdu_crtc_destroy,
@@ -1072,7 +1070,7 @@ static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
1072 vmw_stdu_destroy(vmw_encoder_to_stdu(encoder)); 1070 vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
1073} 1071}
1074 1072
1075static struct drm_encoder_funcs vmw_stdu_encoder_funcs = { 1073static const struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
1076 .destroy = vmw_stdu_encoder_destroy, 1074 .destroy = vmw_stdu_encoder_destroy,
1077}; 1075};
1078 1076
@@ -1099,10 +1097,8 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
1099 1097
1100 1098
1101 1099
1102static struct drm_connector_funcs vmw_stdu_connector_funcs = { 1100static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
1103 .dpms = vmw_du_connector_dpms, 1101 .dpms = vmw_du_connector_dpms,
1104 .save = vmw_du_connector_save,
1105 .restore = vmw_du_connector_restore,
1106 .detect = vmw_du_connector_detect, 1102 .detect = vmw_du_connector_detect,
1107 .fill_modes = vmw_du_connector_fill_modes, 1103 .fill_modes = vmw_du_connector_fill_modes,
1108 .set_property = vmw_du_connector_set_property, 1104 .set_property = vmw_du_connector_set_property,
@@ -1149,7 +1145,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
1149 connector->status = vmw_du_connector_detect(connector, false); 1145 connector->status = vmw_du_connector_detect(connector, false);
1150 1146
1151 drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs, 1147 drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
1152 DRM_MODE_ENCODER_VIRTUAL); 1148 DRM_MODE_ENCODER_VIRTUAL, NULL);
1153 drm_mode_connector_attach_encoder(connector, encoder); 1149 drm_mode_connector_attach_encoder(connector, encoder);
1154 encoder->possible_crtcs = (1 << unit); 1150 encoder->possible_crtcs = (1 << unit);
1155 encoder->possible_clones = 0; 1151 encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7d620e82e000..c2a721a8cef9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -771,7 +771,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
771 } 771 }
772 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), 772 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
773 GFP_KERNEL); 773 GFP_KERNEL);
774 if (unlikely(srf->sizes == NULL)) { 774 if (unlikely(srf->offsets == NULL)) {
775 ret = -ENOMEM; 775 ret = -ENOMEM;
776 goto out_no_offsets; 776 goto out_no_offsets;
777 } 777 }
@@ -815,11 +815,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
815 srf->sizes[0].height == 64 && 815 srf->sizes[0].height == 64 &&
816 srf->format == SVGA3D_A8R8G8B8) { 816 srf->format == SVGA3D_A8R8G8B8) {
817 817
818 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); 818 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
819 /* clear the image */ 819 if (!srf->snooper.image) {
820 if (srf->snooper.image) {
821 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
822 } else {
823 DRM_ERROR("Failed to allocate cursor_image\n"); 820 DRM_ERROR("Failed to allocate cursor_image\n");
824 ret = -ENOMEM; 821 ret = -ENOMEM;
825 goto out_no_copy; 822 goto out_no_copy;
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index c1189f004441..a1d9974cfcb5 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -10,6 +10,7 @@ host1x-y = \
10 mipi.o \ 10 mipi.o \
11 hw/host1x01.o \ 11 hw/host1x01.o \
12 hw/host1x02.o \ 12 hw/host1x02.o \
13 hw/host1x04.o 13 hw/host1x04.o \
14 hw/host1x05.o
14 15
15obj-$(CONFIG_TEGRA_HOST1X) += host1x.o 16obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 4a99c6416e6a..da462afcb225 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -538,6 +538,8 @@ EXPORT_SYMBOL(host1x_driver_register_full);
538 538
539void host1x_driver_unregister(struct host1x_driver *driver) 539void host1x_driver_unregister(struct host1x_driver *driver)
540{ 540{
541 driver_unregister(&driver->driver);
542
541 mutex_lock(&drivers_lock); 543 mutex_lock(&drivers_lock);
542 list_del_init(&driver->list); 544 list_del_init(&driver->list);
543 mutex_unlock(&drivers_lock); 545 mutex_unlock(&drivers_lock);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 53d3d1d45b48..314bf3718cc7 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -35,6 +35,7 @@
35#include "hw/host1x01.h" 35#include "hw/host1x01.h"
36#include "hw/host1x02.h" 36#include "hw/host1x02.h"
37#include "hw/host1x04.h" 37#include "hw/host1x04.h"
38#include "hw/host1x05.h"
38 39
39void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) 40void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
40{ 41{
@@ -87,7 +88,17 @@ static const struct host1x_info host1x04_info = {
87 .sync_offset = 0x2100, 88 .sync_offset = 0x2100,
88}; 89};
89 90
91static const struct host1x_info host1x05_info = {
92 .nb_channels = 14,
93 .nb_pts = 192,
94 .nb_mlocks = 16,
95 .nb_bases = 64,
96 .init = host1x05_init,
97 .sync_offset = 0x2100,
98};
99
90static struct of_device_id host1x_of_match[] = { 100static struct of_device_id host1x_of_match[] = {
101 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
91 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, 102 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
92 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, 103 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
93 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, }, 104 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
@@ -212,6 +223,11 @@ static struct platform_driver tegra_host1x_driver = {
212 .remove = host1x_remove, 223 .remove = host1x_remove,
213}; 224};
214 225
226static struct platform_driver * const drivers[] = {
227 &tegra_host1x_driver,
228 &tegra_mipi_driver,
229};
230
215static int __init tegra_host1x_init(void) 231static int __init tegra_host1x_init(void)
216{ 232{
217 int err; 233 int err;
@@ -220,28 +236,17 @@ static int __init tegra_host1x_init(void)
220 if (err < 0) 236 if (err < 0)
221 return err; 237 return err;
222 238
223 err = platform_driver_register(&tegra_host1x_driver); 239 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
224 if (err < 0)
225 goto unregister_bus;
226
227 err = platform_driver_register(&tegra_mipi_driver);
228 if (err < 0) 240 if (err < 0)
229 goto unregister_host1x; 241 bus_unregister(&host1x_bus_type);
230 242
231 return 0;
232
233unregister_host1x:
234 platform_driver_unregister(&tegra_host1x_driver);
235unregister_bus:
236 bus_unregister(&host1x_bus_type);
237 return err; 243 return err;
238} 244}
239module_init(tegra_host1x_init); 245module_init(tegra_host1x_init);
240 246
241static void __exit tegra_host1x_exit(void) 247static void __exit tegra_host1x_exit(void)
242{ 248{
243 platform_driver_unregister(&tegra_mipi_driver); 249 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
244 platform_driver_unregister(&tegra_host1x_driver);
245 bus_unregister(&host1x_bus_type); 250 bus_unregister(&host1x_bus_type);
246} 251}
247module_exit(tegra_host1x_exit); 252module_exit(tegra_host1x_exit);
diff --git a/drivers/gpu/host1x/hw/host1x05.c b/drivers/gpu/host1x/hw/host1x05.c
new file mode 100644
index 000000000000..047097ce3bad
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x05.c
@@ -0,0 +1,42 @@
1/*
2 * Host1x init for Tegra210 SoCs
3 *
4 * Copyright (c) 2015 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/* include hw specification */
20#include "host1x05.h"
21#include "host1x05_hardware.h"
22
23/* include code */
24#include "cdma_hw.c"
25#include "channel_hw.c"
26#include "debug_hw.c"
27#include "intr_hw.c"
28#include "syncpt_hw.c"
29
30#include "../dev.h"
31
32int host1x05_init(struct host1x *host)
33{
34 host->channel_op = &host1x_channel_ops;
35 host->cdma_op = &host1x_cdma_ops;
36 host->cdma_pb_op = &host1x_pushbuffer_ops;
37 host->syncpt_op = &host1x_syncpt_ops;
38 host->intr_op = &host1x_intr_ops;
39 host->debug_op = &host1x_debug_ops;
40
41 return 0;
42}
diff --git a/drivers/gpu/host1x/hw/host1x05.h b/drivers/gpu/host1x/hw/host1x05.h
new file mode 100644
index 000000000000..a306d9c05cd5
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x05.h
@@ -0,0 +1,26 @@
1/*
2 * Host1x init for Tegra210 SoCs
3 *
4 * Copyright (c) 2015 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef HOST1X_HOST1X05_H
20#define HOST1X_HOST1X05_H
21
22struct host1x;
23
24int host1x05_init(struct host1x *host);
25
26#endif
diff --git a/drivers/gpu/host1x/hw/host1x05_hardware.h b/drivers/gpu/host1x/hw/host1x05_hardware.h
new file mode 100644
index 000000000000..2937ebb6be11
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x05_hardware.h
@@ -0,0 +1,142 @@
1/*
2 * Tegra host1x Register Offsets for Tegra210
3 *
4 * Copyright (c) 2015 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_HOST1X05_HARDWARE_H
20#define __HOST1X_HOST1X05_HARDWARE_H
21
22#include <linux/types.h>
23#include <linux/bitops.h>
24
25#include "hw_host1x05_channel.h"
26#include "hw_host1x05_sync.h"
27#include "hw_host1x05_uclass.h"
28
29static inline u32 host1x_class_host_wait_syncpt(
30 unsigned indx, unsigned threshold)
31{
32 return host1x_uclass_wait_syncpt_indx_f(indx)
33 | host1x_uclass_wait_syncpt_thresh_f(threshold);
34}
35
36static inline u32 host1x_class_host_load_syncpt_base(
37 unsigned indx, unsigned threshold)
38{
39 return host1x_uclass_load_syncpt_base_base_indx_f(indx)
40 | host1x_uclass_load_syncpt_base_value_f(threshold);
41}
42
43static inline u32 host1x_class_host_wait_syncpt_base(
44 unsigned indx, unsigned base_indx, unsigned offset)
45{
46 return host1x_uclass_wait_syncpt_base_indx_f(indx)
47 | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
48 | host1x_uclass_wait_syncpt_base_offset_f(offset);
49}
50
51static inline u32 host1x_class_host_incr_syncpt_base(
52 unsigned base_indx, unsigned offset)
53{
54 return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
55 | host1x_uclass_incr_syncpt_base_offset_f(offset);
56}
57
58static inline u32 host1x_class_host_incr_syncpt(
59 unsigned cond, unsigned indx)
60{
61 return host1x_uclass_incr_syncpt_cond_f(cond)
62 | host1x_uclass_incr_syncpt_indx_f(indx);
63}
64
65static inline u32 host1x_class_host_indoff_reg_write(
66 unsigned mod_id, unsigned offset, bool auto_inc)
67{
68 u32 v = host1x_uclass_indoff_indbe_f(0xf)
69 | host1x_uclass_indoff_indmodid_f(mod_id)
70 | host1x_uclass_indoff_indroffset_f(offset);
71 if (auto_inc)
72 v |= host1x_uclass_indoff_autoinc_f(1);
73 return v;
74}
75
76static inline u32 host1x_class_host_indoff_reg_read(
77 unsigned mod_id, unsigned offset, bool auto_inc)
78{
79 u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
80 | host1x_uclass_indoff_indroffset_f(offset)
81 | host1x_uclass_indoff_rwn_read_v();
82 if (auto_inc)
83 v |= host1x_uclass_indoff_autoinc_f(1);
84 return v;
85}
86
87/* cdma opcodes */
88static inline u32 host1x_opcode_setclass(
89 unsigned class_id, unsigned offset, unsigned mask)
90{
91 return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
92}
93
94static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
95{
96 return (1 << 28) | (offset << 16) | count;
97}
98
99static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
100{
101 return (2 << 28) | (offset << 16) | count;
102}
103
104static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
105{
106 return (3 << 28) | (offset << 16) | mask;
107}
108
109static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
110{
111 return (4 << 28) | (offset << 16) | value;
112}
113
114static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
115{
116 return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
117 host1x_class_host_incr_syncpt(cond, indx));
118}
119
120static inline u32 host1x_opcode_restart(unsigned address)
121{
122 return (5 << 28) | (address >> 4);
123}
124
125static inline u32 host1x_opcode_gather(unsigned count)
126{
127 return (6 << 28) | count;
128}
129
130static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
131{
132 return (6 << 28) | (offset << 16) | BIT(15) | count;
133}
134
135static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
136{
137 return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
138}
139
140#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
141
142#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_channel.h b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
new file mode 100644
index 000000000000..fce6e2c1ff4c
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright (c) 2015 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X05_CHANNEL_H
52#define HOST1X_HW_HOST1X05_CHANNEL_H
53
54static inline u32 host1x_channel_fifostat_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_CHANNEL_FIFOSTAT \
59 host1x_channel_fifostat_r()
60static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
61{
62 return (r >> 11) & 0x1;
63}
64#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
65 host1x_channel_fifostat_cfempty_v(r)
66static inline u32 host1x_channel_dmastart_r(void)
67{
68 return 0x14;
69}
70#define HOST1X_CHANNEL_DMASTART \
71 host1x_channel_dmastart_r()
72static inline u32 host1x_channel_dmaput_r(void)
73{
74 return 0x18;
75}
76#define HOST1X_CHANNEL_DMAPUT \
77 host1x_channel_dmaput_r()
78static inline u32 host1x_channel_dmaget_r(void)
79{
80 return 0x1c;
81}
82#define HOST1X_CHANNEL_DMAGET \
83 host1x_channel_dmaget_r()
84static inline u32 host1x_channel_dmaend_r(void)
85{
86 return 0x20;
87}
88#define HOST1X_CHANNEL_DMAEND \
89 host1x_channel_dmaend_r()
90static inline u32 host1x_channel_dmactrl_r(void)
91{
92 return 0x24;
93}
94#define HOST1X_CHANNEL_DMACTRL \
95 host1x_channel_dmactrl_r()
96static inline u32 host1x_channel_dmactrl_dmastop(void)
97{
98 return 1 << 0;
99}
100#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
101 host1x_channel_dmactrl_dmastop()
102static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
103{
104 return (r >> 0) & 0x1;
105}
106#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
107 host1x_channel_dmactrl_dmastop_v(r)
108static inline u32 host1x_channel_dmactrl_dmagetrst(void)
109{
110 return 1 << 1;
111}
112#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
113 host1x_channel_dmactrl_dmagetrst()
114static inline u32 host1x_channel_dmactrl_dmainitget(void)
115{
116 return 1 << 2;
117}
118#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
119 host1x_channel_dmactrl_dmainitget()
120
121#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_sync.h b/drivers/gpu/host1x/hw/hw_host1x05_sync.h
new file mode 100644
index 000000000000..ca10eee5045c
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x05_sync.h
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2015 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X05_SYNC_H
52#define HOST1X_HW_HOST1X05_SYNC_H
53
54#define REGISTER_STRIDE 4
55
56static inline u32 host1x_sync_syncpt_r(unsigned int id)
57{
58 return 0xf80 + id * REGISTER_STRIDE;
59}
60#define HOST1X_SYNC_SYNCPT(id) \
61 host1x_sync_syncpt_r(id)
62static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
63{
64 return 0xe80 + id * REGISTER_STRIDE;
65}
66#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
67 host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
68static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
69{
70 return 0xf00 + id * REGISTER_STRIDE;
71}
72#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
73 host1x_sync_syncpt_thresh_int_disable_r(id)
74static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
75{
76 return 0xf20 + id * REGISTER_STRIDE;
77}
78#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
79 host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
80static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
81{
82 return 0xc00 + channel * REGISTER_STRIDE;
83}
84#define HOST1X_SYNC_CF_SETUP(channel) \
85 host1x_sync_cf_setup_r(channel)
86static inline u32 host1x_sync_cf_setup_base_v(u32 r)
87{
88 return (r >> 0) & 0x3ff;
89}
90#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
91 host1x_sync_cf_setup_base_v(r)
92static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
93{
94 return (r >> 16) & 0x3ff;
95}
96#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
97 host1x_sync_cf_setup_limit_v(r)
98static inline u32 host1x_sync_cmdproc_stop_r(void)
99{
100 return 0xac;
101}
102#define HOST1X_SYNC_CMDPROC_STOP \
103 host1x_sync_cmdproc_stop_r()
104static inline u32 host1x_sync_ch_teardown_r(void)
105{
106 return 0xb0;
107}
108#define HOST1X_SYNC_CH_TEARDOWN \
109 host1x_sync_ch_teardown_r()
110static inline u32 host1x_sync_usec_clk_r(void)
111{
112 return 0x1a4;
113}
114#define HOST1X_SYNC_USEC_CLK \
115 host1x_sync_usec_clk_r()
116static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
117{
118 return 0x1a8;
119}
120#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
121 host1x_sync_ctxsw_timeout_cfg_r()
122static inline u32 host1x_sync_ip_busy_timeout_r(void)
123{
124 return 0x1bc;
125}
126#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
127 host1x_sync_ip_busy_timeout_r()
128static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
129{
130 return 0x340 + id * REGISTER_STRIDE;
131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_v(u32 r)
135{
136 return (r >> 8) & 0xf;
137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_V(v) \
139 host1x_sync_mlock_owner_chid_v(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{
142 return (r >> 1) & 0x1;
143}
144#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
145 host1x_sync_mlock_owner_cpu_owns_v(r)
146static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
147{
148 return (r >> 0) & 0x1;
149}
150#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
151 host1x_sync_mlock_owner_ch_owns_v(r)
152static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
153{
154 return 0x1380 + id * REGISTER_STRIDE;
155}
156#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
157 host1x_sync_syncpt_int_thresh_r(id)
158static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
159{
160 return 0x600 + id * REGISTER_STRIDE;
161}
162#define HOST1X_SYNC_SYNCPT_BASE(id) \
163 host1x_sync_syncpt_base_r(id)
164static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
165{
166 return 0xf60 + id * REGISTER_STRIDE;
167}
168#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
169 host1x_sync_syncpt_cpu_incr_r(id)
170static inline u32 host1x_sync_cbread_r(unsigned int channel)
171{
172 return 0xc80 + channel * REGISTER_STRIDE;
173}
174#define HOST1X_SYNC_CBREAD(channel) \
175 host1x_sync_cbread_r(channel)
176static inline u32 host1x_sync_cfpeek_ctrl_r(void)
177{
178 return 0x74c;
179}
180#define HOST1X_SYNC_CFPEEK_CTRL \
181 host1x_sync_cfpeek_ctrl_r()
182static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
183{
184 return (v & 0x3ff) << 0;
185}
186#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
187 host1x_sync_cfpeek_ctrl_addr_f(v)
188static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
189{
190 return (v & 0xf) << 16;
191}
192#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
193 host1x_sync_cfpeek_ctrl_channr_f(v)
194static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
195{
196 return (v & 0x1) << 31;
197}
198#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
199 host1x_sync_cfpeek_ctrl_ena_f(v)
200static inline u32 host1x_sync_cfpeek_read_r(void)
201{
202 return 0x750;
203}
204#define HOST1X_SYNC_CFPEEK_READ \
205 host1x_sync_cfpeek_read_r()
206static inline u32 host1x_sync_cfpeek_ptrs_r(void)
207{
208 return 0x754;
209}
210#define HOST1X_SYNC_CFPEEK_PTRS \
211 host1x_sync_cfpeek_ptrs_r()
212static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
213{
214 return (r >> 0) & 0x3ff;
215}
216#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
217 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
218static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
219{
220 return (r >> 16) & 0x3ff;
221}
222#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
223 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
224static inline u32 host1x_sync_cbstat_r(unsigned int channel)
225{
226 return 0xcc0 + channel * REGISTER_STRIDE;
227}
228#define HOST1X_SYNC_CBSTAT(channel) \
229 host1x_sync_cbstat_r(channel)
230static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
231{
232 return (r >> 0) & 0xffff;
233}
234#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
235 host1x_sync_cbstat_cboffset_v(r)
236static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
237{
238 return (r >> 16) & 0x3ff;
239}
240#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
241 host1x_sync_cbstat_cbclass_v(r)
242
243#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_uclass.h b/drivers/gpu/host1x/hw/hw_host1x05_uclass.h
new file mode 100644
index 000000000000..0c411da6bc41
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x05_uclass.h
@@ -0,0 +1,181 @@
1/*
2 * Copyright (c) 2015 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X05_UCLASS_H
52#define HOST1X_HW_HOST1X05_UCLASS_H
53
54static inline u32 host1x_uclass_incr_syncpt_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_UCLASS_INCR_SYNCPT \
59 host1x_uclass_incr_syncpt_r()
60static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
61{
62 return (v & 0xff) << 8;
63}
64#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
65 host1x_uclass_incr_syncpt_cond_f(v)
66static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
67{
68 return (v & 0xff) << 0;
69}
70#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
71 host1x_uclass_incr_syncpt_indx_f(v)
72static inline u32 host1x_uclass_wait_syncpt_r(void)
73{
74 return 0x8;
75}
76#define HOST1X_UCLASS_WAIT_SYNCPT \
77 host1x_uclass_wait_syncpt_r()
78static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
79{
80 return (v & 0xff) << 24;
81}
82#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
83 host1x_uclass_wait_syncpt_indx_f(v)
84static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
85{
86 return (v & 0xffffff) << 0;
87}
88#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
89 host1x_uclass_wait_syncpt_thresh_f(v)
90static inline u32 host1x_uclass_wait_syncpt_base_r(void)
91{
92 return 0x9;
93}
94#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
95 host1x_uclass_wait_syncpt_base_r()
96static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
97{
98 return (v & 0xff) << 24;
99}
100#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
101 host1x_uclass_wait_syncpt_base_indx_f(v)
102static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
103{
104 return (v & 0xff) << 16;
105}
106#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
107 host1x_uclass_wait_syncpt_base_base_indx_f(v)
108static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
109{
110 return (v & 0xffff) << 0;
111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_r(void)
115{
116 return 0xb;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
119 host1x_uclass_load_syncpt_base_r()
120static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
121{
122 return (v & 0xff) << 24;
123}
124#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
125 host1x_uclass_load_syncpt_base_base_indx_f(v)
126static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
127{
128 return (v & 0xffffff) << 0;
129}
130#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
131 host1x_uclass_load_syncpt_base_value_f(v)
132static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
133{
134 return (v & 0xff) << 24;
135}
136#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
137 host1x_uclass_incr_syncpt_base_base_indx_f(v)
138static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
139{
140 return (v & 0xffffff) << 0;
141}
142#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
143 host1x_uclass_incr_syncpt_base_offset_f(v)
144static inline u32 host1x_uclass_indoff_r(void)
145{
146 return 0x2d;
147}
148#define HOST1X_UCLASS_INDOFF \
149 host1x_uclass_indoff_r()
150static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
151{
152 return (v & 0xf) << 28;
153}
154#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
155 host1x_uclass_indoff_indbe_f(v)
156static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
157{
158 return (v & 0x1) << 27;
159}
160#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
161 host1x_uclass_indoff_autoinc_f(v)
162static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
163{
164 return (v & 0xff) << 18;
165}
166#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
167 host1x_uclass_indoff_indmodid_f(v)
168static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
169{
170 return (v & 0xffff) << 2;
171}
172#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
173 host1x_uclass_indoff_indroffset_f(v)
174static inline u32 host1x_uclass_indoff_rwn_read_v(void)
175{
176 return 1;
177}
178#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
179 host1x_uclass_indoff_indroffset_f(v)
180
181#endif
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 41edd5a3f100..d64d9058bce5 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -63,7 +63,7 @@
63 * for the inactive GPU.) Also, muxes are often used to cut power to the 63 * for the inactive GPU.) Also, muxes are often used to cut power to the
64 * discrete GPU while it is not used. 64 * discrete GPU while it is not used.
65 * 65 *
66 * DRM drivers register GPUs with vga_switcheroo, these are heretoforth called 66 * DRM drivers register GPUs with vga_switcheroo, these are henceforth called
67 * clients. The mux is called the handler. Muxless machines also register a 67 * clients. The mux is called the handler. Muxless machines also register a
68 * handler to control the power state of the discrete GPU, its ->switchto 68 * handler to control the power state of the discrete GPU, its ->switchto
69 * callback is a no-op for obvious reasons. The discrete GPU is often equipped 69 * callback is a no-op for obvious reasons. The discrete GPU is often equipped
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 3166e4bc4eb6..9abcaa53bd25 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -395,8 +395,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
395 set_current_state(interruptible ? 395 set_current_state(interruptible ?
396 TASK_INTERRUPTIBLE : 396 TASK_INTERRUPTIBLE :
397 TASK_UNINTERRUPTIBLE); 397 TASK_UNINTERRUPTIBLE);
398 if (signal_pending(current)) { 398 if (interruptible && signal_pending(current)) {
399 rc = -EINTR; 399 __set_current_state(TASK_RUNNING);
400 remove_wait_queue(&vga_wait_queue, &wait);
401 rc = -ERESTARTSYS;
400 break; 402 break;
401 } 403 }
402 schedule(); 404 schedule();
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9024a3de4032..8b78a7f1f779 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -316,11 +316,6 @@
316#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 316#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
317 317
318#define USB_VENDOR_ID_ELAN 0x04f3 318#define USB_VENDOR_ID_ELAN 0x04f3
319#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
320#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
321#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
322#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c
323#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
324 319
325#define USB_VENDOR_ID_ELECOM 0x056e 320#define USB_VENDOR_ID_ELECOM 0x056e
326#define USB_DEVICE_ID_ELECOM_BM084 0x0061 321#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2324520b006d..7dd0953cd70f 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,11 +72,7 @@ static const struct hid_blacklist {
72 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 72 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
73 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 73 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
74 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 74 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, 75 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
76 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
77 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
78 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL },
79 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
80 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 76 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
82 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
@@ -340,7 +336,8 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
340 336
341 for (; hid_blacklist[n].idVendor; n++) 337 for (; hid_blacklist[n].idVendor; n++)
342 if (hid_blacklist[n].idVendor == idVendor && 338 if (hid_blacklist[n].idVendor == idVendor &&
343 hid_blacklist[n].idProduct == idProduct) 339 (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
340 hid_blacklist[n].idProduct == idProduct))
344 bl_entry = &hid_blacklist[n]; 341 bl_entry = &hid_blacklist[n];
345 342
346 if (bl_entry != NULL) 343 if (bl_entry != NULL)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8f59f057cdf4..80a73bfc1a65 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1217,6 +1217,7 @@ config SENSORS_PWM_FAN
1217config SENSORS_SHT15 1217config SENSORS_SHT15
1218 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." 1218 tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
1219 depends on GPIOLIB || COMPILE_TEST 1219 depends on GPIOLIB || COMPILE_TEST
1220 select BITREVERSE
1220 help 1221 help
1221 If you say yes here you get support for the Sensiron SHT10, SHT11, 1222 If you say yes here you get support for the Sensiron SHT10, SHT11,
1222 SHT15, SHT71, SHT75 humidity and temperature sensors. 1223 SHT15, SHT71, SHT75 humidity and temperature sensors.
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 65482624ea2c..5289aa0980a8 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -58,6 +58,7 @@ struct tmp102 {
58 u16 config_orig; 58 u16 config_orig;
59 unsigned long last_update; 59 unsigned long last_update;
60 int temp[3]; 60 int temp[3];
61 bool first_time;
61}; 62};
62 63
63/* convert left adjusted 13-bit TMP102 register value to milliCelsius */ 64/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
@@ -93,6 +94,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev)
93 tmp102->temp[i] = tmp102_reg_to_mC(status); 94 tmp102->temp[i] = tmp102_reg_to_mC(status);
94 } 95 }
95 tmp102->last_update = jiffies; 96 tmp102->last_update = jiffies;
97 tmp102->first_time = false;
96 } 98 }
97 mutex_unlock(&tmp102->lock); 99 mutex_unlock(&tmp102->lock);
98 return tmp102; 100 return tmp102;
@@ -102,6 +104,12 @@ static int tmp102_read_temp(void *dev, int *temp)
102{ 104{
103 struct tmp102 *tmp102 = tmp102_update_device(dev); 105 struct tmp102 *tmp102 = tmp102_update_device(dev);
104 106
107 /* Is it too early even to return a conversion? */
108 if (tmp102->first_time) {
109 dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
110 return -EAGAIN;
111 }
112
105 *temp = tmp102->temp[0]; 113 *temp = tmp102->temp[0];
106 114
107 return 0; 115 return 0;
@@ -114,6 +122,10 @@ static ssize_t tmp102_show_temp(struct device *dev,
114 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); 122 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
115 struct tmp102 *tmp102 = tmp102_update_device(dev); 123 struct tmp102 *tmp102 = tmp102_update_device(dev);
116 124
125 /* Is it too early even to return a read? */
126 if (tmp102->first_time)
127 return -EAGAIN;
128
117 return sprintf(buf, "%d\n", tmp102->temp[sda->index]); 129 return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
118} 130}
119 131
@@ -207,7 +219,9 @@ static int tmp102_probe(struct i2c_client *client,
207 status = -ENODEV; 219 status = -ENODEV;
208 goto fail_restore_config; 220 goto fail_restore_config;
209 } 221 }
210 tmp102->last_update = jiffies - HZ; 222 tmp102->last_update = jiffies;
223 /* Mark that we are not ready with data until conversion is complete */
224 tmp102->first_time = true;
211 mutex_init(&tmp102->lock); 225 mutex_init(&tmp102->lock);
212 226
213 hwmon_dev = hwmon_device_register_with_groups(dev, client->name, 227 hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index c5628a42170a..a8bdcb5292f5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -202,8 +202,15 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
202 * d is always 6 on Keystone I2C controller 202 * d is always 6 on Keystone I2C controller
203 */ 203 */
204 204
205 /* get minimum of 7 MHz clock, but max of 12 MHz */ 205 /*
206 psc = (input_clock / 7000000) - 1; 206 * Both Davinci and current Keystone User Guides recommend a value
207 * between 7MHz and 12MHz. In reality 7MHz module clock doesn't
208 * always produce enough margin between SDA and SCL transitions.
209 * Measurements show that the higher the module clock is, the
210 * bigger is the margin, providing more reliable communication.
211 * So we better target for 12MHz.
212 */
213 psc = (input_clock / 12000000) - 1;
207 if ((input_clock / (psc + 1)) > 12000000) 214 if ((input_clock / (psc + 1)) > 12000000)
208 psc++; /* better to run under spec than over */ 215 psc++; /* better to run under spec than over */
209 d = (psc >= 2) ? 5 : 7 - psc; 216 d = (psc >= 2) ? 5 : 7 - psc;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 8c48b27ba059..de7fbbb374cd 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -813,6 +813,12 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
813tx_aborted: 813tx_aborted:
814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) 814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
815 complete(&dev->cmd_complete); 815 complete(&dev->cmd_complete);
816 else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
817 /* workaround to trigger pending interrupt */
818 stat = dw_readl(dev, DW_IC_INTR_MASK);
819 i2c_dw_disable_int(dev);
820 dw_writel(dev, stat, DW_IC_INTR_MASK);
821 }
816 822
817 return IRQ_HANDLED; 823 return IRQ_HANDLED;
818} 824}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 1d50898e7b24..9ffb63a60f95 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -111,6 +111,7 @@ struct dw_i2c_dev {
111 111
112#define ACCESS_SWAP 0x00000001 112#define ACCESS_SWAP 0x00000001
113#define ACCESS_16BIT 0x00000002 113#define ACCESS_16BIT 0x00000002
114#define ACCESS_INTR_MASK 0x00000004
114 115
115extern int i2c_dw_init(struct dw_i2c_dev *dev); 116extern int i2c_dw_init(struct dw_i2c_dev *dev);
116extern void i2c_dw_disable(struct dw_i2c_dev *dev); 117extern void i2c_dw_disable(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 809579ecb5a4..6b00061c3746 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -93,6 +93,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
93static int dw_i2c_acpi_configure(struct platform_device *pdev) 93static int dw_i2c_acpi_configure(struct platform_device *pdev)
94{ 94{
95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
96 const struct acpi_device_id *id;
96 97
97 dev->adapter.nr = -1; 98 dev->adapter.nr = -1;
98 dev->tx_fifo_depth = 32; 99 dev->tx_fifo_depth = 32;
@@ -106,6 +107,10 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
106 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 107 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
107 &dev->sda_hold_time); 108 &dev->sda_hold_time);
108 109
110 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
111 if (id && id->driver_data)
112 dev->accessor_flags |= (u32)id->driver_data;
113
109 return 0; 114 return 0;
110} 115}
111 116
@@ -116,7 +121,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
116 { "INT3433", 0 }, 121 { "INT3433", 0 },
117 { "80860F41", 0 }, 122 { "80860F41", 0 },
118 { "808622C1", 0 }, 123 { "808622C1", 0 },
119 { "AMD0010", 0 }, 124 { "AMD0010", ACCESS_INTR_MASK },
120 { } 125 { }
121}; 126};
122MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); 127MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
@@ -240,12 +245,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
240 } 245 }
241 246
242 r = i2c_dw_probe(dev); 247 r = i2c_dw_probe(dev);
243 if (r) { 248 if (r && !dev->pm_runtime_disabled)
244 pm_runtime_disable(&pdev->dev); 249 pm_runtime_disable(&pdev->dev);
245 return r;
246 }
247 250
248 return 0; 251 return r;
249} 252}
250 253
251static int dw_i2c_plat_remove(struct platform_device *pdev) 254static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -260,7 +263,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
260 263
261 pm_runtime_dont_use_autosuspend(&pdev->dev); 264 pm_runtime_dont_use_autosuspend(&pdev->dev);
262 pm_runtime_put_sync(&pdev->dev); 265 pm_runtime_put_sync(&pdev->dev);
263 pm_runtime_disable(&pdev->dev); 266 if (!dev->pm_runtime_disabled)
267 pm_runtime_disable(&pdev->dev);
264 268
265 return 0; 269 return 0;
266} 270}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 9bb0b056b25f..d4d853680ae4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1119,6 +1119,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
1119 i2c_imx, IMX_I2C_I2CR); 1119 i2c_imx, IMX_I2C_I2CR);
1120 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); 1120 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
1121 1121
1122 i2c_imx_init_recovery_info(i2c_imx, pdev);
1123
1122 /* Add I2C adapter */ 1124 /* Add I2C adapter */
1123 ret = i2c_add_numbered_adapter(&i2c_imx->adapter); 1125 ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
1124 if (ret < 0) { 1126 if (ret < 0) {
@@ -1126,8 +1128,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
1126 goto clk_disable; 1128 goto clk_disable;
1127 } 1129 }
1128 1130
1129 i2c_imx_init_recovery_info(i2c_imx, pdev);
1130
1131 /* Set up platform driver data */ 1131 /* Set up platform driver data */
1132 platform_set_drvdata(pdev, i2c_imx); 1132 platform_set_drvdata(pdev, i2c_imx);
1133 clk_disable_unprepare(i2c_imx->clk); 1133 clk_disable_unprepare(i2c_imx->clk);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5801227b97ab..43207f52e5a3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -146,6 +146,8 @@ struct mv64xxx_i2c_data {
146 bool errata_delay; 146 bool errata_delay;
147 struct reset_control *rstc; 147 struct reset_control *rstc;
148 bool irq_clear_inverted; 148 bool irq_clear_inverted;
149 /* Clk div is 2 to the power n, not 2 to the power n + 1 */
150 bool clk_n_base_0;
149}; 151};
150 152
151static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { 153static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -757,25 +759,29 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
757#ifdef CONFIG_OF 759#ifdef CONFIG_OF
758#ifdef CONFIG_HAVE_CLK 760#ifdef CONFIG_HAVE_CLK
759static int 761static int
760mv64xxx_calc_freq(const int tclk, const int n, const int m) 762mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data,
763 const int tclk, const int n, const int m)
761{ 764{
762 return tclk / (10 * (m + 1) * (2 << n)); 765 if (drv_data->clk_n_base_0)
766 return tclk / (10 * (m + 1) * (1 << n));
767 else
768 return tclk / (10 * (m + 1) * (2 << n));
763} 769}
764 770
765static bool 771static bool
766mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, 772mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data,
767 u32 *best_m) 773 const u32 req_freq, const u32 tclk)
768{ 774{
769 int freq, delta, best_delta = INT_MAX; 775 int freq, delta, best_delta = INT_MAX;
770 int m, n; 776 int m, n;
771 777
772 for (n = 0; n <= 7; n++) 778 for (n = 0; n <= 7; n++)
773 for (m = 0; m <= 15; m++) { 779 for (m = 0; m <= 15; m++) {
774 freq = mv64xxx_calc_freq(tclk, n, m); 780 freq = mv64xxx_calc_freq(drv_data, tclk, n, m);
775 delta = req_freq - freq; 781 delta = req_freq - freq;
776 if (delta >= 0 && delta < best_delta) { 782 if (delta >= 0 && delta < best_delta) {
777 *best_m = m; 783 drv_data->freq_m = m;
778 *best_n = n; 784 drv_data->freq_n = n;
779 best_delta = delta; 785 best_delta = delta;
780 } 786 }
781 if (best_delta == 0) 787 if (best_delta == 0)
@@ -813,8 +819,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
813 if (of_property_read_u32(np, "clock-frequency", &bus_freq)) 819 if (of_property_read_u32(np, "clock-frequency", &bus_freq))
814 bus_freq = 100000; /* 100kHz by default */ 820 bus_freq = 100000; /* 100kHz by default */
815 821
816 if (!mv64xxx_find_baud_factors(bus_freq, tclk, 822 if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") ||
817 &drv_data->freq_n, &drv_data->freq_m)) { 823 of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
824 drv_data->clk_n_base_0 = true;
825
826 if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) {
818 rc = -EINVAL; 827 rc = -EINVAL;
819 goto out; 828 goto out;
820 } 829 }
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index b0ae560b38c3..599c0d7bd906 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -576,7 +576,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
576 if (slave->flags & I2C_CLIENT_TEN) 576 if (slave->flags & I2C_CLIENT_TEN)
577 return -EAFNOSUPPORT; 577 return -EAFNOSUPPORT;
578 578
579 pm_runtime_forbid(rcar_i2c_priv_to_dev(priv)); 579 pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv));
580 580
581 priv->slave = slave; 581 priv->slave = slave;
582 rcar_i2c_write(priv, ICSAR, slave->addr); 582 rcar_i2c_write(priv, ICSAR, slave->addr);
@@ -598,7 +598,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
598 598
599 priv->slave = NULL; 599 priv->slave = NULL;
600 600
601 pm_runtime_allow(rcar_i2c_priv_to_dev(priv)); 601 pm_runtime_put(rcar_i2c_priv_to_dev(priv));
602 602
603 return 0; 603 return 0;
604} 604}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index c1935ebd6a9c..9096d17beb5b 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -908,7 +908,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
908 &i2c->scl_fall_ns)) 908 &i2c->scl_fall_ns))
909 i2c->scl_fall_ns = 300; 909 i2c->scl_fall_ns = 300;
910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", 910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
911 &i2c->scl_fall_ns)) 911 &i2c->sda_fall_ns))
912 i2c->sda_fall_ns = i2c->scl_fall_ns; 912 i2c->sda_fall_ns = i2c->scl_fall_ns;
913 913
914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); 914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index ea72dca32fdf..25020ec777c9 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -822,7 +822,7 @@ static int st_i2c_probe(struct platform_device *pdev)
822 822
823 adap = &i2c_dev->adap; 823 adap = &i2c_dev->adap;
824 i2c_set_adapdata(adap, i2c_dev); 824 i2c_set_adapdata(adap, i2c_dev);
825 snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); 825 snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start);
826 adap->owner = THIS_MODULE; 826 adap->owner = THIS_MODULE;
827 adap->timeout = 2 * HZ; 827 adap->timeout = 2 * HZ;
828 adap->retries = 0; 828 adap->retries = 0;
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 0c4618b4d515..c2babe50a0d8 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -839,8 +839,10 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
839 839
840 for_each_available_child_of_node(node, child) { 840 for_each_available_child_of_node(node, child) {
841 ret = vadc_get_dt_channel_data(vadc->dev, &prop, child); 841 ret = vadc_get_dt_channel_data(vadc->dev, &prop, child);
842 if (ret) 842 if (ret) {
843 of_node_put(child);
843 return ret; 844 return ret;
845 }
844 846
845 vadc->chan_props[index] = prop; 847 vadc->chan_props[index] = prop;
846 848
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d7e908acb480..0f6f63b20263 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -302,7 +302,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
302 if (trialmask == NULL) 302 if (trialmask == NULL)
303 return -ENOMEM; 303 return -ENOMEM;
304 if (!indio_dev->masklength) { 304 if (!indio_dev->masklength) {
305 WARN_ON("Trying to set scanmask prior to registering buffer\n"); 305 WARN(1, "Trying to set scanmask prior to registering buffer\n");
306 goto err_invalid_mask; 306 goto err_invalid_mask;
307 } 307 }
308 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 308 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 208358f9e7e3..159ede61f793 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -655,7 +655,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
655 break; 655 break;
656 case IIO_SEPARATE: 656 case IIO_SEPARATE:
657 if (!chan->indexed) { 657 if (!chan->indexed) {
658 WARN_ON("Differential channels must be indexed\n"); 658 WARN(1, "Differential channels must be indexed\n");
659 ret = -EINVAL; 659 ret = -EINVAL;
660 goto error_free_full_postfix; 660 goto error_free_full_postfix;
661 } 661 }
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 7d269ef9e062..f6a07dc32ae4 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -453,6 +453,7 @@ static int apds9960_set_power_state(struct apds9960_data *data, bool on)
453 usleep_range(data->als_adc_int_us, 453 usleep_range(data->als_adc_int_us,
454 APDS9960_MAX_INT_TIME_IN_US); 454 APDS9960_MAX_INT_TIME_IN_US);
455 } else { 455 } else {
456 pm_runtime_mark_last_busy(dev);
456 ret = pm_runtime_put_autosuspend(dev); 457 ret = pm_runtime_put_autosuspend(dev);
457 } 458 }
458 459
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 961f9f990faf..e544fcfd5ced 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -130,10 +130,10 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
130 if (ret < 0) 130 if (ret < 0)
131 break; 131 break;
132 132
133 /* return 0 since laser is likely pointed out of range */ 133 /* return -EINVAL since laser is likely pointed out of range */
134 if (ret & LIDAR_REG_STATUS_INVALID) { 134 if (ret & LIDAR_REG_STATUS_INVALID) {
135 *reg = 0; 135 *reg = 0;
136 ret = 0; 136 ret = -EINVAL;
137 break; 137 break;
138 } 138 }
139 139
@@ -197,7 +197,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
197 if (!ret) { 197 if (!ret) {
198 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 198 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
199 iio_get_time_ns()); 199 iio_get_time_ns());
200 } else { 200 } else if (ret != -EINVAL) {
201 dev_err(&data->client->dev, "cannot read LIDAR measurement"); 201 dev_err(&data->client->dev, "cannot read LIDAR measurement");
202 } 202 }
203 203
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 944cd90417bc..d2d5d004f16d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1126,10 +1126,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
1126 1126
1127 rcu_read_lock(); 1127 rcu_read_lock();
1128 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1128 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1129 if (err) 1129 ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1130 return false;
1131
1132 ret = FIB_RES_DEV(res) == net_dev;
1133 rcu_read_unlock(); 1130 rcu_read_unlock();
1134 1131
1135 return ret; 1132 return ret;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8d8af7a41a30..2281de122038 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1811 if (qp_num == 0) 1811 if (qp_num == 0)
1812 valid = 1; 1812 valid = 1;
1813 } else { 1813 } else {
1814 /* CM attributes other than ClassPortInfo only use Send method */
1815 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1816 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1817 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1818 goto out;
1814 /* Filter GSI packets sent to QP0 */ 1819 /* Filter GSI packets sent to QP0 */
1815 if (qp_num != 0) 1820 if (qp_num != 0)
1816 valid = 1; 1821 valid = 1;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 2aba774f835b..a95a32ba596e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -512,7 +512,7 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
512 return len; 512 return len;
513} 513}
514 514
515static int ib_nl_send_msg(struct ib_sa_query *query) 515static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
516{ 516{
517 struct sk_buff *skb = NULL; 517 struct sk_buff *skb = NULL;
518 struct nlmsghdr *nlh; 518 struct nlmsghdr *nlh;
@@ -526,7 +526,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
526 if (len <= 0) 526 if (len <= 0)
527 return -EMSGSIZE; 527 return -EMSGSIZE;
528 528
529 skb = nlmsg_new(len, GFP_KERNEL); 529 skb = nlmsg_new(len, gfp_mask);
530 if (!skb) 530 if (!skb)
531 return -ENOMEM; 531 return -ENOMEM;
532 532
@@ -544,7 +544,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
544 /* Repair the nlmsg header length */ 544 /* Repair the nlmsg header length */
545 nlmsg_end(skb, nlh); 545 nlmsg_end(skb, nlh);
546 546
547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); 547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
548 if (!ret) 548 if (!ret)
549 ret = len; 549 ret = len;
550 else 550 else
@@ -553,7 +553,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
553 return ret; 553 return ret;
554} 554}
555 555
556static int ib_nl_make_request(struct ib_sa_query *query) 556static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
557{ 557{
558 unsigned long flags; 558 unsigned long flags;
559 unsigned long delay; 559 unsigned long delay;
@@ -562,25 +562,27 @@ static int ib_nl_make_request(struct ib_sa_query *query)
562 INIT_LIST_HEAD(&query->list); 562 INIT_LIST_HEAD(&query->list);
563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
564 564
565 /* Put the request on the list first.*/
565 spin_lock_irqsave(&ib_nl_request_lock, flags); 566 spin_lock_irqsave(&ib_nl_request_lock, flags);
566 ret = ib_nl_send_msg(query);
567 if (ret <= 0) {
568 ret = -EIO;
569 goto request_out;
570 } else {
571 ret = 0;
572 }
573
574 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 567 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
575 query->timeout = delay + jiffies; 568 query->timeout = delay + jiffies;
576 list_add_tail(&query->list, &ib_nl_request_list); 569 list_add_tail(&query->list, &ib_nl_request_list);
577 /* Start the timeout if this is the only request */ 570 /* Start the timeout if this is the only request */
578 if (ib_nl_request_list.next == &query->list) 571 if (ib_nl_request_list.next == &query->list)
579 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 572 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
580
581request_out:
582 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 573 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
583 574
575 ret = ib_nl_send_msg(query, gfp_mask);
576 if (ret <= 0) {
577 ret = -EIO;
578 /* Remove the request */
579 spin_lock_irqsave(&ib_nl_request_lock, flags);
580 list_del(&query->list);
581 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
582 } else {
583 ret = 0;
584 }
585
584 return ret; 586 return ret;
585} 587}
586 588
@@ -1108,7 +1110,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1108 1110
1109 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 1111 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1110 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1112 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1111 if (!ib_nl_make_request(query)) 1113 if (!ib_nl_make_request(query, gfp_mask))
1112 return id; 1114 return id;
1113 } 1115 }
1114 ib_sa_disable_local_svc(query); 1116 ib_sa_disable_local_svc(query);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 94816aeb95a0..1c02deab068f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows: 62 * The ib_uobject locking scheme is as follows:
63 * 63 *
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is 65 * needs to be held during all idr write operations. When an object is
66 * looked up, a reference must be taken on the object's kref before 66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock. 67 * dropping this lock. For read operations, the rcu_read_lock()
68 * and rcu_write_lock() but similarly the kref reference is grabbed
69 * before the rcu_read_unlock().
68 * 70 *
69 * - Each object also has an rwsem. This rwsem must be held for 71 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed. 72 * reading while an operation that uses the object is performed.
@@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
96 98
97static void release_uobj(struct kref *kref) 99static void release_uobj(struct kref *kref)
98{ 100{
99 kfree(container_of(kref, struct ib_uobject, ref)); 101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
100} 102}
101 103
102static void put_uobj(struct ib_uobject *uobj) 104static void put_uobj(struct ib_uobject *uobj)
@@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
145{ 147{
146 struct ib_uobject *uobj; 148 struct ib_uobject *uobj;
147 149
148 spin_lock(&ib_uverbs_idr_lock); 150 rcu_read_lock();
149 uobj = idr_find(idr, id); 151 uobj = idr_find(idr, id);
150 if (uobj) { 152 if (uobj) {
151 if (uobj->context == context) 153 if (uobj->context == context)
@@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
153 else 155 else
154 uobj = NULL; 156 uobj = NULL;
155 } 157 }
156 spin_unlock(&ib_uverbs_idr_lock); 158 rcu_read_unlock();
157 159
158 return uobj; 160 return uobj;
159} 161}
@@ -2446,6 +2448,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2446 int i, sg_ind; 2448 int i, sg_ind;
2447 int is_ud; 2449 int is_ud;
2448 ssize_t ret = -EINVAL; 2450 ssize_t ret = -EINVAL;
2451 size_t next_size;
2449 2452
2450 if (copy_from_user(&cmd, buf, sizeof cmd)) 2453 if (copy_from_user(&cmd, buf, sizeof cmd))
2451 return -EFAULT; 2454 return -EFAULT;
@@ -2490,7 +2493,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2490 goto out_put; 2493 goto out_put;
2491 } 2494 }
2492 2495
2493 ud = alloc_wr(sizeof(*ud), user_wr->num_sge); 2496 next_size = sizeof(*ud);
2497 ud = alloc_wr(next_size, user_wr->num_sge);
2494 if (!ud) { 2498 if (!ud) {
2495 ret = -ENOMEM; 2499 ret = -ENOMEM;
2496 goto out_put; 2500 goto out_put;
@@ -2511,7 +2515,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2511 user_wr->opcode == IB_WR_RDMA_READ) { 2515 user_wr->opcode == IB_WR_RDMA_READ) {
2512 struct ib_rdma_wr *rdma; 2516 struct ib_rdma_wr *rdma;
2513 2517
2514 rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); 2518 next_size = sizeof(*rdma);
2519 rdma = alloc_wr(next_size, user_wr->num_sge);
2515 if (!rdma) { 2520 if (!rdma) {
2516 ret = -ENOMEM; 2521 ret = -ENOMEM;
2517 goto out_put; 2522 goto out_put;
@@ -2525,7 +2530,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2525 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2530 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2526 struct ib_atomic_wr *atomic; 2531 struct ib_atomic_wr *atomic;
2527 2532
2528 atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); 2533 next_size = sizeof(*atomic);
2534 atomic = alloc_wr(next_size, user_wr->num_sge);
2529 if (!atomic) { 2535 if (!atomic) {
2530 ret = -ENOMEM; 2536 ret = -ENOMEM;
2531 goto out_put; 2537 goto out_put;
@@ -2540,7 +2546,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2540 } else if (user_wr->opcode == IB_WR_SEND || 2546 } else if (user_wr->opcode == IB_WR_SEND ||
2541 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2547 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2542 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2548 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2543 next = alloc_wr(sizeof(*next), user_wr->num_sge); 2549 next_size = sizeof(*next);
2550 next = alloc_wr(next_size, user_wr->num_sge);
2544 if (!next) { 2551 if (!next) {
2545 ret = -ENOMEM; 2552 ret = -ENOMEM;
2546 goto out_put; 2553 goto out_put;
@@ -2572,7 +2579,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2572 2579
2573 if (next->num_sge) { 2580 if (next->num_sge) {
2574 next->sg_list = (void *) next + 2581 next->sg_list = (void *) next +
2575 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2582 ALIGN(next_size, sizeof(struct ib_sge));
2576 if (copy_from_user(next->sg_list, 2583 if (copy_from_user(next->sg_list,
2577 buf + sizeof cmd + 2584 buf + sizeof cmd +
2578 cmd.wr_count * cmd.wqe_size + 2585 cmd.wr_count * cmd.wqe_size +
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 043a60ee6836..545906dec26d 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1516,7 +1516,7 @@ EXPORT_SYMBOL(ib_map_mr_sg);
1516 * @sg_nents: number of entries in sg 1516 * @sg_nents: number of entries in sg
1517 * @set_page: driver page assignment function pointer 1517 * @set_page: driver page assignment function pointer
1518 * 1518 *
1519 * Core service helper for drivers to covert the largest 1519 * Core service helper for drivers to convert the largest
1520 * prefix of given sg list to a page vector. The sg list 1520 * prefix of given sg list to a page vector. The sg list
1521 * prefix converted is the prefix that meet the requirements 1521 * prefix converted is the prefix that meet the requirements
1522 * of ib_map_mr_sg. 1522 * of ib_map_mr_sg.
@@ -1533,7 +1533,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
1533 u64 last_end_dma_addr = 0, last_page_addr = 0; 1533 u64 last_end_dma_addr = 0, last_page_addr = 0;
1534 unsigned int last_page_off = 0; 1534 unsigned int last_page_off = 0;
1535 u64 page_mask = ~((u64)mr->page_size - 1); 1535 u64 page_mask = ~((u64)mr->page_size - 1);
1536 int i; 1536 int i, ret;
1537 1537
1538 mr->iova = sg_dma_address(&sgl[0]); 1538 mr->iova = sg_dma_address(&sgl[0]);
1539 mr->length = 0; 1539 mr->length = 0;
@@ -1544,27 +1544,29 @@ int ib_sg_to_pages(struct ib_mr *mr,
1544 u64 end_dma_addr = dma_addr + dma_len; 1544 u64 end_dma_addr = dma_addr + dma_len;
1545 u64 page_addr = dma_addr & page_mask; 1545 u64 page_addr = dma_addr & page_mask;
1546 1546
1547 if (i && page_addr != dma_addr) { 1547 /*
1548 if (last_end_dma_addr != dma_addr) { 1548 * For the second and later elements, check whether either the
1549 /* gap */ 1549 * end of element i-1 or the start of element i is not aligned
1550 goto done; 1550 * on a page boundary.
1551 1551 */
1552 } else if (last_page_off + dma_len <= mr->page_size) { 1552 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1553 /* chunk this fragment with the last */ 1553 /* Stop mapping if there is a gap. */
1554 mr->length += dma_len; 1554 if (last_end_dma_addr != dma_addr)
1555 last_end_dma_addr += dma_len; 1555 break;
1556 last_page_off += dma_len; 1556
1557 continue; 1557 /*
1558 } else { 1558 * Coalesce this element with the last. If it is small
1559 /* map starting from the next page */ 1559 * enough just update mr->length. Otherwise start
1560 page_addr = last_page_addr + mr->page_size; 1560 * mapping from the next page.
1561 dma_len -= mr->page_size - last_page_off; 1561 */
1562 } 1562 goto next_page;
1563 } 1563 }
1564 1564
1565 do { 1565 do {
1566 if (unlikely(set_page(mr, page_addr))) 1566 ret = set_page(mr, page_addr);
1567 goto done; 1567 if (unlikely(ret < 0))
1568 return i ? : ret;
1569next_page:
1568 page_addr += mr->page_size; 1570 page_addr += mr->page_size;
1569 } while (page_addr < end_dma_addr); 1571 } while (page_addr < end_dma_addr);
1570 1572
@@ -1574,7 +1576,6 @@ int ib_sg_to_pages(struct ib_mr *mr,
1574 last_page_off = end_dma_addr & ~page_mask; 1576 last_page_off = end_dma_addr & ~page_mask;
1575 } 1577 }
1576 1578
1577done:
1578 return i; 1579 return i;
1579} 1580}
1580EXPORT_SYMBOL(ib_sg_to_pages); 1581EXPORT_SYMBOL(ib_sg_to_pages);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f567160a4a56..97d6878f9938 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -456,7 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
456 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 456 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
457 props->max_sge = min(dev->dev->caps.max_sq_sg, 457 props->max_sge = min(dev->dev->caps.max_sq_sg,
458 dev->dev->caps.max_rq_sg); 458 dev->dev->caps.max_rq_sg);
459 props->max_sge_rd = props->max_sge; 459 props->max_sge_rd = MLX4_MAX_SGE_RD;
460 props->max_cq = dev->dev->quotas.cq; 460 props->max_cq = dev->dev->quotas.cq;
461 props->max_cqe = dev->dev->caps.max_cqes; 461 props->max_cqe = dev->dev->caps.max_cqes;
462 props->max_mr = dev->dev->quotas.mpt; 462 props->max_mr = dev->dev->quotas.mpt;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a2e4ca56da44..13eaaf45288f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -34,6 +34,7 @@
34#include <linux/log2.h> 34#include <linux/log2.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
37 38
38#include <rdma/ib_cache.h> 39#include <rdma/ib_cache.h>
39#include <rdma/ib_pack.h> 40#include <rdma/ib_pack.h>
@@ -795,8 +796,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
795 if (err) 796 if (err)
796 goto err_mtt; 797 goto err_mtt;
797 798
798 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); 799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp);
799 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); 800 if (!qp->sq.wrid)
801 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
802 gfp, PAGE_KERNEL);
803 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp);
804 if (!qp->rq.wrid)
805 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
806 gfp, PAGE_KERNEL);
800 if (!qp->sq.wrid || !qp->rq.wrid) { 807 if (!qp->sq.wrid || !qp->rq.wrid) {
801 err = -ENOMEM; 808 err = -ENOMEM;
802 goto err_wrid; 809 goto err_wrid;
@@ -886,8 +893,8 @@ err_wrid:
886 if (qp_has_rq(init_attr)) 893 if (qp_has_rq(init_attr))
887 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); 894 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
888 } else { 895 } else {
889 kfree(qp->sq.wrid); 896 kvfree(qp->sq.wrid);
890 kfree(qp->rq.wrid); 897 kvfree(qp->rq.wrid);
891 } 898 }
892 899
893err_mtt: 900err_mtt:
@@ -1062,8 +1069,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1062 &qp->db); 1069 &qp->db);
1063 ib_umem_release(qp->umem); 1070 ib_umem_release(qp->umem);
1064 } else { 1071 } else {
1065 kfree(qp->sq.wrid); 1072 kvfree(qp->sq.wrid);
1066 kfree(qp->rq.wrid); 1073 kvfree(qp->rq.wrid);
1067 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | 1074 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1068 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) 1075 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
1069 free_proxy_bufs(&dev->ib_dev, qp); 1076 free_proxy_bufs(&dev->ib_dev, qp);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index dce5dfe3a70e..8d133c40fa0e 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -34,6 +34,7 @@
34#include <linux/mlx4/qp.h> 34#include <linux/mlx4/qp.h>
35#include <linux/mlx4/srq.h> 35#include <linux/mlx4/srq.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/vmalloc.h>
37 38
38#include "mlx4_ib.h" 39#include "mlx4_ib.h"
39#include "user.h" 40#include "user.h"
@@ -172,8 +173,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
172 173
173 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); 174 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
174 if (!srq->wrid) { 175 if (!srq->wrid) {
175 err = -ENOMEM; 176 srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64),
176 goto err_mtt; 177 GFP_KERNEL, PAGE_KERNEL);
178 if (!srq->wrid) {
179 err = -ENOMEM;
180 goto err_mtt;
181 }
177 } 182 }
178 } 183 }
179 184
@@ -204,7 +209,7 @@ err_wrid:
204 if (pd->uobject) 209 if (pd->uobject)
205 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); 210 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
206 else 211 else
207 kfree(srq->wrid); 212 kvfree(srq->wrid);
208 213
209err_mtt: 214err_mtt:
210 mlx4_mtt_cleanup(dev->dev, &srq->mtt); 215 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ec8993a7b3be..6000f7aeede9 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -381,7 +381,19 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
381 } 381 }
382 } 382 }
383 } else if (ent->cur > 2 * ent->limit) { 383 } else if (ent->cur > 2 * ent->limit) {
384 if (!someone_adding(cache) && 384 /*
385 * The remove_keys() logic is performed as garbage collection
386 * task. Such task is intended to be run when no other active
387 * processes are running.
388 *
389 * The need_resched() will return TRUE if there are user tasks
390 * to be activated in near future.
391 *
392 * In such case, we don't execute remove_keys() and postpone
393 * the garbage collection work to try to run in next cycle,
394 * in order to free CPU resources to other tasks.
395 */
396 if (!need_resched() && !someone_adding(cache) &&
385 time_after(jiffies, cache->last_add + 300 * HZ)) { 397 time_after(jiffies, cache->last_add + 300 * HZ)) {
386 remove_keys(dev, i, 1); 398 remove_keys(dev, i, 1);
387 if (ent->cur > ent->limit) 399 if (ent->cur > ent->limit)
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 5e27f76805e2..4c7c3c84a741 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -292,7 +292,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
292 qib_dev_porterr(ppd->dd, ppd->port, 292 qib_dev_porterr(ppd->dd, ppd->port,
293 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]); 293 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
294 294
295 if ((peek[2] & 2) == 0) { 295 if ((peek[2] & 4) == 0) {
296 /* 296 /*
297 * If cable is paged, rather than "flat memory", we need to 297 * If cable is paged, rather than "flat memory", we need to
298 * set the page to zero, Even if it already appears to be zero. 298 * set the page to zero, Even if it already appears to be zero.
@@ -538,7 +538,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
538 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", 538 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
539 QSFP_DATE_LEN, cd.date); 539 QSFP_DATE_LEN, cd.date);
540 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", 540 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
541 QSFP_LOT_LEN, cd.date); 541 QSFP_LOT_LEN, cd.lot);
542 542
543 while (bidx < QSFP_DEFAULT_HDR_CNT) { 543 while (bidx < QSFP_DEFAULT_HDR_CNT) {
544 int iidx; 544 int iidx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 2baf5ad251ed..bc803f33d5f6 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -329,9 +329,9 @@ struct qib_sge {
329struct qib_mr { 329struct qib_mr {
330 struct ib_mr ibmr; 330 struct ib_mr ibmr;
331 struct ib_umem *umem; 331 struct ib_umem *umem;
332 struct qib_mregion mr; /* must be last */
333 u64 *pages; 332 u64 *pages;
334 u32 npages; 333 u32 npages;
334 struct qib_mregion mr; /* must be last */
335}; 335};
336 336
337/* 337/*
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a93070210109..42f4da620f2e 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1293,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1293 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1293 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1294 sector_t sector_off = mr_status.sig_err.sig_err_offset; 1294 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1295 1295
1296 do_div(sector_off, sector_size + 8); 1296 sector_div(sector_off, sector_size + 8);
1297 *sector = scsi_get_lba(iser_task->sc) + sector_off; 1297 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1298 1298
1299 pr_err("PI error found type %d at sector %llx " 1299 pr_err("PI error found type %d at sector %llx "
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dfbbbb28090b..8a51c3b5d657 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -157,16 +157,9 @@ isert_create_qp(struct isert_conn *isert_conn,
157 attr.recv_cq = comp->cq; 157 attr.recv_cq = comp->cq;
158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
160 /* 160 attr.cap.max_send_sge = device->dev_attr.max_sge;
161 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 161 isert_conn->max_sge = min(device->dev_attr.max_sge,
162 * work-around for RDMA_READs with ConnectX-2. 162 device->dev_attr.max_sge_rd);
163 *
164 * Also, still make sure to have at least two SGEs for
165 * outgoing control PDU responses.
166 */
167 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
168 isert_conn->max_sge = attr.cap.max_send_sge;
169
170 attr.cap.max_recv_sge = 1; 163 attr.cap.max_recv_sge = 1;
171 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 164 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
172 attr.qp_type = IB_QPT_RC; 165 attr.qp_type = IB_QPT_RC;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9909022dc6c3..3db9a659719b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -488,7 +488,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
488 struct ib_qp *qp; 488 struct ib_qp *qp;
489 struct ib_fmr_pool *fmr_pool = NULL; 489 struct ib_fmr_pool *fmr_pool = NULL;
490 struct srp_fr_pool *fr_pool = NULL; 490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = 1 + dev->use_fast_reg; 491 const int m = dev->use_fast_reg ? 3 : 1;
492 struct ib_cq_init_attr cq_attr = {}; 492 struct ib_cq_init_attr cq_attr = {};
493 int ret; 493 int ret;
494 494
@@ -994,16 +994,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
994 994
995 ret = srp_lookup_path(ch); 995 ret = srp_lookup_path(ch);
996 if (ret) 996 if (ret)
997 return ret; 997 goto out;
998 998
999 while (1) { 999 while (1) {
1000 init_completion(&ch->done); 1000 init_completion(&ch->done);
1001 ret = srp_send_req(ch, multich); 1001 ret = srp_send_req(ch, multich);
1002 if (ret) 1002 if (ret)
1003 return ret; 1003 goto out;
1004 ret = wait_for_completion_interruptible(&ch->done); 1004 ret = wait_for_completion_interruptible(&ch->done);
1005 if (ret < 0) 1005 if (ret < 0)
1006 return ret; 1006 goto out;
1007 1007
1008 /* 1008 /*
1009 * The CM event handling code will set status to 1009 * The CM event handling code will set status to
@@ -1011,15 +1011,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp 1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1012 * redirect REJ back. 1012 * redirect REJ back.
1013 */ 1013 */
1014 switch (ch->status) { 1014 ret = ch->status;
1015 switch (ret) {
1015 case 0: 1016 case 0:
1016 ch->connected = true; 1017 ch->connected = true;
1017 return 0; 1018 goto out;
1018 1019
1019 case SRP_PORT_REDIRECT: 1020 case SRP_PORT_REDIRECT:
1020 ret = srp_lookup_path(ch); 1021 ret = srp_lookup_path(ch);
1021 if (ret) 1022 if (ret)
1022 return ret; 1023 goto out;
1023 break; 1024 break;
1024 1025
1025 case SRP_DLID_REDIRECT: 1026 case SRP_DLID_REDIRECT:
@@ -1028,13 +1029,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1028 case SRP_STALE_CONN: 1029 case SRP_STALE_CONN:
1029 shost_printk(KERN_ERR, target->scsi_host, PFX 1030 shost_printk(KERN_ERR, target->scsi_host, PFX
1030 "giving up on stale connection\n"); 1031 "giving up on stale connection\n");
1031 ch->status = -ECONNRESET; 1032 ret = -ECONNRESET;
1032 return ch->status; 1033 goto out;
1033 1034
1034 default: 1035 default:
1035 return ch->status; 1036 goto out;
1036 } 1037 }
1037 } 1038 }
1039
1040out:
1041 return ret <= 0 ? ret : -ENODEV;
1038} 1042}
1039 1043
1040static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) 1044static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
@@ -1309,7 +1313,7 @@ reset_state:
1309} 1313}
1310 1314
1311static int srp_map_finish_fr(struct srp_map_state *state, 1315static int srp_map_finish_fr(struct srp_map_state *state,
1312 struct srp_rdma_ch *ch) 1316 struct srp_rdma_ch *ch, int sg_nents)
1313{ 1317{
1314 struct srp_target_port *target = ch->target; 1318 struct srp_target_port *target = ch->target;
1315 struct srp_device *dev = target->srp_host->srp_dev; 1319 struct srp_device *dev = target->srp_host->srp_dev;
@@ -1324,10 +1328,10 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1324 1328
1325 WARN_ON_ONCE(!dev->use_fast_reg); 1329 WARN_ON_ONCE(!dev->use_fast_reg);
1326 1330
1327 if (state->sg_nents == 0) 1331 if (sg_nents == 0)
1328 return 0; 1332 return 0;
1329 1333
1330 if (state->sg_nents == 1 && target->global_mr) { 1334 if (sg_nents == 1 && target->global_mr) {
1331 srp_map_desc(state, sg_dma_address(state->sg), 1335 srp_map_desc(state, sg_dma_address(state->sg),
1332 sg_dma_len(state->sg), 1336 sg_dma_len(state->sg),
1333 target->global_mr->rkey); 1337 target->global_mr->rkey);
@@ -1341,8 +1345,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1341 rkey = ib_inc_rkey(desc->mr->rkey); 1345 rkey = ib_inc_rkey(desc->mr->rkey);
1342 ib_update_fast_reg_key(desc->mr, rkey); 1346 ib_update_fast_reg_key(desc->mr, rkey);
1343 1347
1344 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, 1348 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
1345 dev->mr_page_size);
1346 if (unlikely(n < 0)) 1349 if (unlikely(n < 0))
1347 return n; 1350 return n;
1348 1351
@@ -1448,16 +1451,15 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1448 state->fr.next = req->fr_list; 1451 state->fr.next = req->fr_list;
1449 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; 1452 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1450 state->sg = scat; 1453 state->sg = scat;
1451 state->sg_nents = scsi_sg_count(req->scmnd);
1452 1454
1453 while (state->sg_nents) { 1455 while (count) {
1454 int i, n; 1456 int i, n;
1455 1457
1456 n = srp_map_finish_fr(state, ch); 1458 n = srp_map_finish_fr(state, ch, count);
1457 if (unlikely(n < 0)) 1459 if (unlikely(n < 0))
1458 return n; 1460 return n;
1459 1461
1460 state->sg_nents -= n; 1462 count -= n;
1461 for (i = 0; i < n; i++) 1463 for (i = 0; i < n; i++)
1462 state->sg = sg_next(state->sg); 1464 state->sg = sg_next(state->sg);
1463 } 1465 }
@@ -1517,10 +1519,12 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1517 1519
1518 if (dev->use_fast_reg) { 1520 if (dev->use_fast_reg) {
1519 state.sg = idb_sg; 1521 state.sg = idb_sg;
1520 state.sg_nents = 1;
1521 sg_set_buf(idb_sg, req->indirect_desc, idb_len); 1522 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1522 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ 1523 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1523 ret = srp_map_finish_fr(&state, ch); 1524#ifdef CONFIG_NEED_SG_DMA_LENGTH
1525 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1526#endif
1527 ret = srp_map_finish_fr(&state, ch, 1);
1524 if (ret < 0) 1528 if (ret < 0)
1525 return ret; 1529 return ret;
1526 } else if (dev->use_fmr) { 1530 } else if (dev->use_fmr) {
@@ -1655,7 +1659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1655 return ret; 1659 return ret;
1656 req->nmdesc++; 1660 req->nmdesc++;
1657 } else { 1661 } else {
1658 idb_rkey = target->global_mr->rkey; 1662 idb_rkey = cpu_to_be32(target->global_mr->rkey);
1659 } 1663 }
1660 1664
1661 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1665 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 87a2a919dc43..f6af531f9f32 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -300,10 +300,7 @@ struct srp_map_state {
300 dma_addr_t base_dma_addr; 300 dma_addr_t base_dma_addr;
301 u32 dma_len; 301 u32 dma_len;
302 u32 total_len; 302 u32 total_len;
303 union { 303 unsigned int npages;
304 unsigned int npages;
305 int sg_nents;
306 };
307 unsigned int nmdesc; 304 unsigned int nmdesc;
308 unsigned int ndesc; 305 unsigned int ndesc;
309}; 306};
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 932d07307454..da326090c2b0 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -592,6 +592,7 @@ static void db9_attach(struct parport *pp)
592 return; 592 return;
593 } 593 }
594 594
595 memset(&db9_parport_cb, 0, sizeof(db9_parport_cb));
595 db9_parport_cb.flags = PARPORT_FLAG_EXCL; 596 db9_parport_cb.flags = PARPORT_FLAG_EXCL;
596 597
597 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx); 598 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx);
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 5a672dcac0d8..eae14d512353 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -951,6 +951,7 @@ static void gc_attach(struct parport *pp)
951 pads = gc_cfg[port_idx].args + 1; 951 pads = gc_cfg[port_idx].args + 1;
952 n_pads = gc_cfg[port_idx].nargs - 1; 952 n_pads = gc_cfg[port_idx].nargs - 1;
953 953
954 memset(&gc_parport_cb, 0, sizeof(gc_parport_cb));
954 gc_parport_cb.flags = PARPORT_FLAG_EXCL; 955 gc_parport_cb.flags = PARPORT_FLAG_EXCL;
955 956
956 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb, 957 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb,
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 9f5bca26bd2f..77f575dd0901 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -181,6 +181,7 @@ static void tgfx_attach(struct parport *pp)
181 n_buttons = tgfx_cfg[port_idx].args + 1; 181 n_buttons = tgfx_cfg[port_idx].args + 1;
182 n_devs = tgfx_cfg[port_idx].nargs - 1; 182 n_devs = tgfx_cfg[port_idx].nargs - 1;
183 183
184 memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb));
184 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; 185 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL;
185 186
186 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb, 187 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb,
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 9c07fe911075..70a893a17467 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -218,6 +218,7 @@ static void walkera0701_attach(struct parport *pp)
218 218
219 w->parport = pp; 219 w->parport = pp;
220 220
221 memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb));
221 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; 222 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL;
222 walkera0701_parport_cb.irq_func = walkera0701_irq_handler; 223 walkera0701_parport_cb.irq_func = walkera0701_irq_handler;
223 walkera0701_parport_cb.private = w; 224 walkera0701_parport_cb.private = w;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4bf678541496..d5994a745ffa 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -97,8 +97,7 @@ static void arizona_haptics_work(struct work_struct *work)
97 97
98 ret = regmap_update_bits(arizona->regmap, 98 ret = regmap_update_bits(arizona->regmap,
99 ARIZONA_HAPTICS_CONTROL_1, 99 ARIZONA_HAPTICS_CONTROL_1,
100 ARIZONA_HAP_CTRL_MASK, 100 ARIZONA_HAP_CTRL_MASK, 0);
101 1 << ARIZONA_HAP_CTRL_SHIFT);
102 if (ret != 0) { 101 if (ret != 0) {
103 dev_err(arizona->dev, "Failed to stop haptics: %d\n", 102 dev_err(arizona->dev, "Failed to stop haptics: %d\n",
104 ret); 103 ret);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 5e1665bbaa0b..2f589857a039 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -41,6 +41,7 @@
41 41
42#define DRIVER_NAME "elan_i2c" 42#define DRIVER_NAME "elan_i2c"
43#define ELAN_DRIVER_VERSION "1.6.1" 43#define ELAN_DRIVER_VERSION "1.6.1"
44#define ELAN_VENDOR_ID 0x04f3
44#define ETP_MAX_PRESSURE 255 45#define ETP_MAX_PRESSURE 255
45#define ETP_FWIDTH_REDUCE 90 46#define ETP_FWIDTH_REDUCE 90
46#define ETP_FINGER_WIDTH 15 47#define ETP_FINGER_WIDTH 15
@@ -914,6 +915,8 @@ static int elan_setup_input_device(struct elan_tp_data *data)
914 915
915 input->name = "Elan Touchpad"; 916 input->name = "Elan Touchpad";
916 input->id.bustype = BUS_I2C; 917 input->id.bustype = BUS_I2C;
918 input->id.vendor = ELAN_VENDOR_ID;
919 input->id.product = data->product_id;
917 input_set_drvdata(input, data); 920 input_set_drvdata(input, data);
918 921
919 error = input_mt_init_slots(input, ETP_MAX_FINGERS, 922 error = input_mt_init_slots(input, ETP_MAX_FINGERS,
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 92c31b8f8fb4..1edfac78d4ac 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -145,6 +145,7 @@ static int parkbd_getport(struct parport *pp)
145{ 145{
146 struct pardev_cb parkbd_parport_cb; 146 struct pardev_cb parkbd_parport_cb;
147 147
148 memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb));
148 parkbd_parport_cb.irq_func = parkbd_interrupt; 149 parkbd_parport_cb.irq_func = parkbd_interrupt;
149 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; 150 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL;
150 151
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index e7f966da6efa..78ca44840d60 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1819,6 +1819,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); 1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); 1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
1821 1821
1822 /* Verify that a device really has an endpoint */
1823 if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1824 dev_err(&intf->dev,
1825 "interface has %d endpoints, but must have minimum 1\n",
1826 intf->altsetting[0].desc.bNumEndpoints);
1827 err = -EINVAL;
1828 goto fail3;
1829 }
1822 endpoint = &intf->altsetting[0].endpoint[0].desc; 1830 endpoint = &intf->altsetting[0].endpoint[0].desc;
1823 1831
1824 /* Go set up our URB, which is called when the tablet receives 1832 /* Go set up our URB, which is called when the tablet receives
@@ -1861,6 +1869,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1861 if (i == ARRAY_SIZE(speeds)) { 1869 if (i == ARRAY_SIZE(speeds)) {
1862 dev_info(&intf->dev, 1870 dev_info(&intf->dev,
1863 "Aiptek tried all speeds, no sane response\n"); 1871 "Aiptek tried all speeds, no sane response\n");
1872 err = -EINVAL;
1864 goto fail3; 1873 goto fail3;
1865 } 1874 }
1866 1875
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index c5622058c22b..2d5794ec338b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2487,6 +2487,31 @@ static struct mxt_acpi_platform_data samus_platform_data[] = {
2487 { } 2487 { }
2488}; 2488};
2489 2489
2490static unsigned int chromebook_tp_buttons[] = {
2491 KEY_RESERVED,
2492 KEY_RESERVED,
2493 KEY_RESERVED,
2494 KEY_RESERVED,
2495 KEY_RESERVED,
2496 BTN_LEFT
2497};
2498
2499static struct mxt_acpi_platform_data chromebook_platform_data[] = {
2500 {
2501 /* Touchpad */
2502 .hid = "ATML0000",
2503 .pdata = {
2504 .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons),
2505 .t19_keymap = chromebook_tp_buttons,
2506 },
2507 },
2508 {
2509 /* Touchscreen */
2510 .hid = "ATML0001",
2511 },
2512 { }
2513};
2514
2490static const struct dmi_system_id mxt_dmi_table[] = { 2515static const struct dmi_system_id mxt_dmi_table[] = {
2491 { 2516 {
2492 /* 2015 Google Pixel */ 2517 /* 2015 Google Pixel */
@@ -2497,6 +2522,14 @@ static const struct dmi_system_id mxt_dmi_table[] = {
2497 }, 2522 },
2498 .driver_data = samus_platform_data, 2523 .driver_data = samus_platform_data,
2499 }, 2524 },
2525 {
2526 /* Other Google Chromebooks */
2527 .ident = "Chromebook",
2528 .matches = {
2529 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
2530 },
2531 .driver_data = chromebook_platform_data,
2532 },
2500 { } 2533 { }
2501}; 2534};
2502 2535
@@ -2701,6 +2734,7 @@ static const struct i2c_device_id mxt_id[] = {
2701 { "qt602240_ts", 0 }, 2734 { "qt602240_ts", 0 },
2702 { "atmel_mxt_ts", 0 }, 2735 { "atmel_mxt_ts", 0 },
2703 { "atmel_mxt_tp", 0 }, 2736 { "atmel_mxt_tp", 0 },
2737 { "maxtouch", 0 },
2704 { "mXT224", 0 }, 2738 { "mXT224", 0 },
2705 { } 2739 { }
2706}; 2740};
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 17cc20ef4923..ac09855fa435 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1316,7 +1316,13 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
1316 1316
1317 disable_irq(client->irq); 1317 disable_irq(client->irq);
1318 1318
1319 if (device_may_wakeup(dev) || ts->keep_power_in_suspend) { 1319 if (device_may_wakeup(dev)) {
1320 /*
1321 * The device will automatically enter idle mode
1322 * that has reduced power consumption.
1323 */
1324 ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
1325 } else if (ts->keep_power_in_suspend) {
1320 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1326 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
1321 error = elants_i2c_send(client, set_sleep_cmd, 1327 error = elants_i2c_send(client, set_sleep_cmd,
1322 sizeof(set_sleep_cmd)); 1328 sizeof(set_sleep_cmd));
@@ -1326,10 +1332,6 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
1326 dev_err(&client->dev, 1332 dev_err(&client->dev,
1327 "suspend command failed: %d\n", error); 1333 "suspend command failed: %d\n", error);
1328 } 1334 }
1329
1330 if (device_may_wakeup(dev))
1331 ts->wake_irq_enabled =
1332 (enable_irq_wake(client->irq) == 0);
1333 } else { 1335 } else {
1334 elants_i2c_power_off(ts); 1336 elants_i2c_power_off(ts);
1335 } 1337 }
@@ -1345,10 +1347,11 @@ static int __maybe_unused elants_i2c_resume(struct device *dev)
1345 int retry_cnt; 1347 int retry_cnt;
1346 int error; 1348 int error;
1347 1349
1348 if (device_may_wakeup(dev) && ts->wake_irq_enabled) 1350 if (device_may_wakeup(dev)) {
1349 disable_irq_wake(client->irq); 1351 if (ts->wake_irq_enabled)
1350 1352 disable_irq_wake(client->irq);
1351 if (ts->keep_power_in_suspend) { 1353 elants_i2c_sw_reset(client);
1354 } else if (ts->keep_power_in_suspend) {
1352 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1355 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
1353 error = elants_i2c_send(client, set_active_cmd, 1356 error = elants_i2c_send(client, set_active_cmd,
1354 sizeof(set_active_cmd)); 1357 sizeof(set_active_cmd));
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d21d4edf7236..7caf2fa237f2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault)
494 } 494 }
495} 495}
496 496
497static bool access_error(struct vm_area_struct *vma, struct fault *fault)
498{
499 unsigned long requested = 0;
500
501 if (fault->flags & PPR_FAULT_EXEC)
502 requested |= VM_EXEC;
503
504 if (fault->flags & PPR_FAULT_READ)
505 requested |= VM_READ;
506
507 if (fault->flags & PPR_FAULT_WRITE)
508 requested |= VM_WRITE;
509
510 return (requested & ~vma->vm_flags) != 0;
511}
512
497static void do_fault(struct work_struct *work) 513static void do_fault(struct work_struct *work)
498{ 514{
499 struct fault *fault = container_of(work, struct fault, work); 515 struct fault *fault = container_of(work, struct fault, work);
@@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work)
516 goto out; 532 goto out;
517 } 533 }
518 534
519 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { 535 /* Check if we have the right permissions on the vma */
520 /* handle_mm_fault would BUG_ON() */ 536 if (access_error(vma, fault)) {
521 up_read(&mm->mmap_sem); 537 up_read(&mm->mmap_sem);
522 handle_fault_error(fault); 538 handle_fault_error(fault);
523 goto out; 539 goto out;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f1042daef9ad..ac7387686ddc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2159 sg_res = aligned_nrpages(sg->offset, sg->length); 2159 sg_res = aligned_nrpages(sg->offset, sg->length);
2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2161 sg->dma_length = sg->length; 2161 sg->dma_length = sg->length;
2162 pteval = (sg_phys(sg) & PAGE_MASK) | prot; 2162 pteval = page_to_phys(sg_page(sg)) | prot;
2163 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2163 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2164 } 2164 }
2165 2165
@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
3704 3704
3705 for_each_sg(sglist, sg, nelems, i) { 3705 for_each_sg(sglist, sg, nelems, i) {
3706 BUG_ON(!sg_page(sg)); 3706 BUG_ON(!sg_page(sg));
3707 sg->dma_address = sg_phys(sg); 3707 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3708 sg->dma_length = sg->length; 3708 sg->dma_length = sg->length;
3709 } 3709 }
3710 return nelems; 3710 return nelems;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index c69e3f9ec958..50464833d0b8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -484,6 +484,23 @@ struct page_req_dsc {
484}; 484};
485 485
486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) 486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
487
488static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
489{
490 unsigned long requested = 0;
491
492 if (req->exe_req)
493 requested |= VM_EXEC;
494
495 if (req->rd_req)
496 requested |= VM_READ;
497
498 if (req->wr_req)
499 requested |= VM_WRITE;
500
501 return (requested & ~vma->vm_flags) != 0;
502}
503
487static irqreturn_t prq_event_thread(int irq, void *d) 504static irqreturn_t prq_event_thread(int irq, void *d)
488{ 505{
489 struct intel_iommu *iommu = d; 506 struct intel_iommu *iommu = d;
@@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
539 if (!vma || address < vma->vm_start) 556 if (!vma || address < vma->vm_start)
540 goto invalid; 557 goto invalid;
541 558
559 if (access_error(vma, req))
560 goto invalid;
561
542 ret = handle_mm_fault(svm->mm, vma, address, 562 ret = handle_mm_fault(svm->mm, vma, address,
543 req->wr_req ? FAULT_FLAG_WRITE : 0); 563 req->wr_req ? FAULT_FLAG_WRITE : 0);
544 if (ret & VM_FAULT_ERROR) 564 if (ret & VM_FAULT_ERROR)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index abae363c7b9b..0e3b0092ec92 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1431 1431
1432 for_each_sg(sg, s, nents, i) { 1432 for_each_sg(sg, s, nents, i) {
1433 phys_addr_t phys = sg_phys(s); 1433 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1434 1434
1435 /* 1435 /*
1436 * We are mapping on IOMMU page boundaries, so offset within 1436 * We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 598ab3f0e0ac..cadf104e3074 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -210,7 +210,12 @@ int __init fpga_irq_of_init(struct device_node *node,
210 parent_irq = -1; 210 parent_irq = -1;
211 } 211 }
212 212
213#ifdef CONFIG_ARCH_VERSATILE
214 fpga_irq_init(base, node->name, IRQ_SIC_START, parent_irq, valid_mask,
215 node);
216#else
213 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); 217 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
218#endif
214 219
215 writel(clear_mask, base + IRQ_ENABLE_CLEAR); 220 writel(clear_mask, base + IRQ_ENABLE_CLEAR);
216 writel(clear_mask, base + FIQ_ENABLE_CLEAR); 221 writel(clear_mask, base + FIQ_ENABLE_CLEAR);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 375be509e95f..2a506fe0c8a4 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs)
67 struct sk_buff *skb = bcs->tx_skb; 67 struct sk_buff *skb = bcs->tx_skb;
68 int sent = -EOPNOTSUPP; 68 int sent = -EOPNOTSUPP;
69 69
70 if (!tty || !tty->driver || !skb) 70 WARN_ON(!tty || !tty->ops || !skb);
71 return -EINVAL;
72 71
73 if (!skb->len) { 72 if (!skb->len) {
74 dev_kfree_skb_any(skb); 73 dev_kfree_skb_any(skb);
@@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs)
109 unsigned long flags; 108 unsigned long flags;
110 int sent = 0; 109 int sent = 0;
111 110
112 if (!tty || !tty->driver) 111 WARN_ON(!tty || !tty->ops);
113 return -EFAULT;
114 112
115 cb = cs->cmdbuf; 113 cb = cs->cmdbuf;
116 if (!cb) 114 if (!cb)
@@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs)
370 tasklet_kill(&cs->write_tasklet); 368 tasklet_kill(&cs->write_tasklet);
371 if (!cs->hw.ser) 369 if (!cs->hw.ser)
372 return; 370 return;
373 dev_set_drvdata(&cs->hw.ser->dev.dev, NULL);
374 platform_device_unregister(&cs->hw.ser->dev); 371 platform_device_unregister(&cs->hw.ser->dev);
375 kfree(cs->hw.ser);
376 cs->hw.ser = NULL;
377} 372}
378 373
379static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
380{ 375{
381 struct platform_device *pdev = to_platform_device(dev); 376 struct cardstate *cs = dev_get_drvdata(dev);
382 377
383 /* adapted from platform_device_release() in drivers/base/platform.c */ 378 if (!cs)
384 kfree(dev->platform_data); 379 return;
385 kfree(pdev->resource); 380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
386} 383}
387 384
388/* 385/*
@@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
432 struct tty_struct *tty = cs->hw.ser->tty; 429 struct tty_struct *tty = cs->hw.ser->tty;
433 unsigned int set, clear; 430 unsigned int set, clear;
434 431
435 if (!tty || !tty->driver || !tty->ops->tiocmset) 432 WARN_ON(!tty || !tty->ops);
433 /* tiocmset is an optional tty driver method */
434 if (!tty->ops->tiocmset)
436 return -EINVAL; 435 return -EINVAL;
437 set = new_state & ~old_state; 436 set = new_state & ~old_state;
438 clear = old_state & ~new_state; 437 clear = old_state & ~new_state;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index a77eea594b69..cb428b9ee441 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1170 1170
1171 if (ipac->type & IPAC_TYPE_IPACX) { 1171 if (ipac->type & IPAC_TYPE_IPACX) {
1172 ista = ReadIPAC(ipac, ISACX_ISTA); 1172 ista = ReadIPAC(ipac, ISACX_ISTA);
1173 while (ista && cnt--) { 1173 while (ista && --cnt) {
1174 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1174 pr_debug("%s: ISTA %02x\n", ipac->name, ista);
1175 if (ista & IPACX__ICA) 1175 if (ista & IPACX__ICA)
1176 ipac_irq(&ipac->hscx[0], ista); 1176 ipac_irq(&ipac->hscx[0], ista);
@@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1182 } 1182 }
1183 } else if (ipac->type & IPAC_TYPE_IPAC) { 1183 } else if (ipac->type & IPAC_TYPE_IPAC) {
1184 ista = ReadIPAC(ipac, IPAC_ISTA); 1184 ista = ReadIPAC(ipac, IPAC_ISTA);
1185 while (ista && cnt--) { 1185 while (ista && --cnt) {
1186 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1186 pr_debug("%s: ISTA %02x\n", ipac->name, ista);
1187 if (ista & (IPAC__ICD | IPAC__EXD)) { 1187 if (ista & (IPAC__ICD | IPAC__EXD)) {
1188 istad = ReadISAC(isac, ISAC_ISTA); 1188 istad = ReadISAC(isac, ISAC_ISTA);
@@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1200 ista = ReadIPAC(ipac, IPAC_ISTA); 1200 ista = ReadIPAC(ipac, IPAC_ISTA);
1201 } 1201 }
1202 } else if (ipac->type & IPAC_TYPE_HSCX) { 1202 } else if (ipac->type & IPAC_TYPE_HSCX) {
1203 while (cnt) { 1203 while (--cnt) {
1204 ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off); 1204 ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off);
1205 pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista); 1205 pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista);
1206 if (ista) 1206 if (ista)
@@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1211 mISDNisac_irq(isac, istad); 1211 mISDNisac_irq(isac, istad);
1212 if (0 == (ista | istad)) 1212 if (0 == (ista | istad))
1213 break; 1213 break;
1214 cnt--;
1215 } 1214 }
1216 } 1215 }
1217 if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */ 1216 if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index a16bf56d3f28..85a339030e4b 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -18,6 +18,7 @@ if NVM
18 18
19config NVM_DEBUG 19config NVM_DEBUG
20 bool "Open-Channel SSD debugging support" 20 bool "Open-Channel SSD debugging support"
21 default n
21 ---help--- 22 ---help---
22 Exposes a debug management interface to create/remove targets at: 23 Exposes a debug management interface to create/remove targets at:
23 24
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 86ce887b2ed6..8f41b245cd55 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(nvm_unregister_target);
74void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, 74void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
75 dma_addr_t *dma_handler) 75 dma_addr_t *dma_handler)
76{ 76{
77 return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags, 77 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
78 dma_handler); 78 dma_handler);
79} 79}
80EXPORT_SYMBOL(nvm_dev_dma_alloc); 80EXPORT_SYMBOL(nvm_dev_dma_alloc);
@@ -97,15 +97,47 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name)
97 return NULL; 97 return NULL;
98} 98}
99 99
100struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
101{
102 struct nvmm_type *mt;
103 int ret;
104
105 lockdep_assert_held(&nvm_lock);
106
107 list_for_each_entry(mt, &nvm_mgrs, list) {
108 ret = mt->register_mgr(dev);
109 if (ret < 0) {
110 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
111 ret, dev->name);
112 return NULL; /* initialization failed */
113 } else if (ret > 0)
114 return mt;
115 }
116
117 return NULL;
118}
119
100int nvm_register_mgr(struct nvmm_type *mt) 120int nvm_register_mgr(struct nvmm_type *mt)
101{ 121{
122 struct nvm_dev *dev;
102 int ret = 0; 123 int ret = 0;
103 124
104 down_write(&nvm_lock); 125 down_write(&nvm_lock);
105 if (nvm_find_mgr_type(mt->name)) 126 if (nvm_find_mgr_type(mt->name)) {
106 ret = -EEXIST; 127 ret = -EEXIST;
107 else 128 goto finish;
129 } else {
108 list_add(&mt->list, &nvm_mgrs); 130 list_add(&mt->list, &nvm_mgrs);
131 }
132
133 /* try to register media mgr if any device have none configured */
134 list_for_each_entry(dev, &nvm_devices, devices) {
135 if (dev->mt)
136 continue;
137
138 dev->mt = nvm_init_mgr(dev);
139 }
140finish:
109 up_write(&nvm_lock); 141 up_write(&nvm_lock);
110 142
111 return ret; 143 return ret;
@@ -123,26 +155,6 @@ void nvm_unregister_mgr(struct nvmm_type *mt)
123} 155}
124EXPORT_SYMBOL(nvm_unregister_mgr); 156EXPORT_SYMBOL(nvm_unregister_mgr);
125 157
126/* register with device with a supported manager */
127static int register_mgr(struct nvm_dev *dev)
128{
129 struct nvmm_type *mt;
130 int ret = 0;
131
132 list_for_each_entry(mt, &nvm_mgrs, list) {
133 ret = mt->register_mgr(dev);
134 if (ret > 0) {
135 dev->mt = mt;
136 break; /* successfully initialized */
137 }
138 }
139
140 if (!ret)
141 pr_info("nvm: no compatible nvm manager found.\n");
142
143 return ret;
144}
145
146static struct nvm_dev *nvm_find_nvm_dev(const char *name) 158static struct nvm_dev *nvm_find_nvm_dev(const char *name)
147{ 159{
148 struct nvm_dev *dev; 160 struct nvm_dev *dev;
@@ -246,7 +258,7 @@ static int nvm_init(struct nvm_dev *dev)
246 if (!dev->q || !dev->ops) 258 if (!dev->q || !dev->ops)
247 return ret; 259 return ret;
248 260
249 if (dev->ops->identity(dev->q, &dev->identity)) { 261 if (dev->ops->identity(dev, &dev->identity)) {
250 pr_err("nvm: device could not be identified\n"); 262 pr_err("nvm: device could not be identified\n");
251 goto err; 263 goto err;
252 } 264 }
@@ -271,14 +283,6 @@ static int nvm_init(struct nvm_dev *dev)
271 goto err; 283 goto err;
272 } 284 }
273 285
274 down_write(&nvm_lock);
275 ret = register_mgr(dev);
276 up_write(&nvm_lock);
277 if (ret < 0)
278 goto err;
279 if (!ret)
280 return 0;
281
282 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", 286 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
283 dev->name, dev->sec_per_pg, dev->nr_planes, 287 dev->name, dev->sec_per_pg, dev->nr_planes,
284 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns, 288 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
@@ -326,8 +330,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
326 } 330 }
327 331
328 if (dev->ops->max_phys_sect > 1) { 332 if (dev->ops->max_phys_sect > 1) {
329 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, 333 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
330 "ppalist");
331 if (!dev->ppalist_pool) { 334 if (!dev->ppalist_pool) {
332 pr_err("nvm: could not create ppa pool\n"); 335 pr_err("nvm: could not create ppa pool\n");
333 ret = -ENOMEM; 336 ret = -ENOMEM;
@@ -335,7 +338,9 @@ int nvm_register(struct request_queue *q, char *disk_name,
335 } 338 }
336 } 339 }
337 340
341 /* register device with a supported media manager */
338 down_write(&nvm_lock); 342 down_write(&nvm_lock);
343 dev->mt = nvm_init_mgr(dev);
339 list_add(&dev->devices, &nvm_devices); 344 list_add(&dev->devices, &nvm_devices);
340 up_write(&nvm_lock); 345 up_write(&nvm_lock);
341 346
@@ -380,19 +385,13 @@ static int nvm_create_target(struct nvm_dev *dev,
380 struct nvm_tgt_type *tt; 385 struct nvm_tgt_type *tt;
381 struct nvm_target *t; 386 struct nvm_target *t;
382 void *targetdata; 387 void *targetdata;
383 int ret = 0;
384 388
385 down_write(&nvm_lock);
386 if (!dev->mt) { 389 if (!dev->mt) {
387 ret = register_mgr(dev); 390 pr_info("nvm: device has no media manager registered.\n");
388 if (!ret) 391 return -ENODEV;
389 ret = -ENODEV;
390 if (ret < 0) {
391 up_write(&nvm_lock);
392 return ret;
393 }
394 } 392 }
395 393
394 down_write(&nvm_lock);
396 tt = nvm_find_target_type(create->tgttype); 395 tt = nvm_find_target_type(create->tgttype);
397 if (!tt) { 396 if (!tt) {
398 pr_err("nvm: target type %s not found\n", create->tgttype); 397 pr_err("nvm: target type %s not found\n", create->tgttype);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 35dde84b71e9..f434e89e1c7a 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -195,7 +195,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
195 } 195 }
196 196
197 if (dev->ops->get_l2p_tbl) { 197 if (dev->ops->get_l2p_tbl) {
198 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, 198 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
199 gennvm_block_map, dev); 199 gennvm_block_map, dev);
200 if (ret) { 200 if (ret) {
201 pr_err("gennvm: could not read L2P table.\n"); 201 pr_err("gennvm: could not read L2P table.\n");
@@ -219,6 +219,9 @@ static int gennvm_register(struct nvm_dev *dev)
219 struct gen_nvm *gn; 219 struct gen_nvm *gn;
220 int ret; 220 int ret;
221 221
222 if (!try_module_get(THIS_MODULE))
223 return -ENODEV;
224
222 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL); 225 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
223 if (!gn) 226 if (!gn)
224 return -ENOMEM; 227 return -ENOMEM;
@@ -242,12 +245,14 @@ static int gennvm_register(struct nvm_dev *dev)
242 return 1; 245 return 1;
243err: 246err:
244 gennvm_free(dev); 247 gennvm_free(dev);
248 module_put(THIS_MODULE);
245 return ret; 249 return ret;
246} 250}
247 251
248static void gennvm_unregister(struct nvm_dev *dev) 252static void gennvm_unregister(struct nvm_dev *dev)
249{ 253{
250 gennvm_free(dev); 254 gennvm_free(dev);
255 module_put(THIS_MODULE);
251} 256}
252 257
253static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 258static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
@@ -262,14 +267,11 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
262 if (list_empty(&lun->free_list)) { 267 if (list_empty(&lun->free_list)) {
263 pr_err_ratelimited("gennvm: lun %u have no free pages available", 268 pr_err_ratelimited("gennvm: lun %u have no free pages available",
264 lun->vlun.id); 269 lun->vlun.id);
265 spin_unlock(&vlun->lock);
266 goto out; 270 goto out;
267 } 271 }
268 272
269 while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) { 273 if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
270 spin_unlock(&vlun->lock);
271 goto out; 274 goto out;
272 }
273 275
274 blk = list_first_entry(&lun->free_list, struct nvm_block, list); 276 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
275 list_move_tail(&blk->list, &lun->used_list); 277 list_move_tail(&blk->list, &lun->used_list);
@@ -278,8 +280,8 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
278 lun->vlun.nr_free_blocks--; 280 lun->vlun.nr_free_blocks--;
279 lun->vlun.nr_inuse_blocks++; 281 lun->vlun.nr_inuse_blocks++;
280 282
281 spin_unlock(&vlun->lock);
282out: 283out:
284 spin_unlock(&vlun->lock);
283 return blk; 285 return blk;
284} 286}
285 287
@@ -349,7 +351,7 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
349 gennvm_generic_to_addr_mode(dev, rqd); 351 gennvm_generic_to_addr_mode(dev, rqd);
350 352
351 rqd->dev = dev; 353 rqd->dev = dev;
352 return dev->ops->submit_io(dev->q, rqd); 354 return dev->ops->submit_io(dev, rqd);
353} 355}
354 356
355static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, 357static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
@@ -385,7 +387,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
385 if (!dev->ops->set_bb_tbl) 387 if (!dev->ops->set_bb_tbl)
386 return; 388 return;
387 389
388 if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) 390 if (dev->ops->set_bb_tbl(dev, rqd, 1))
389 return; 391 return;
390 392
391 gennvm_addr_to_generic_mode(dev, rqd); 393 gennvm_addr_to_generic_mode(dev, rqd);
@@ -453,7 +455,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
453 455
454 gennvm_generic_to_addr_mode(dev, &rqd); 456 gennvm_generic_to_addr_mode(dev, &rqd);
455 457
456 ret = dev->ops->erase_block(dev->q, &rqd); 458 ret = dev->ops->erase_block(dev, &rqd);
457 459
458 if (plane_cnt) 460 if (plane_cnt)
459 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); 461 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 75e59c3a3f96..134e4faba482 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -182,7 +182,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
182 struct nvm_block *blk; 182 struct nvm_block *blk;
183 struct rrpc_block *rblk; 183 struct rrpc_block *rblk;
184 184
185 blk = nvm_get_blk(rrpc->dev, rlun->parent, 0); 185 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
186 if (!blk) 186 if (!blk)
187 return NULL; 187 return NULL;
188 188
@@ -202,6 +202,20 @@ static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
202 nvm_put_blk(rrpc->dev, rblk->parent); 202 nvm_put_blk(rrpc->dev, rblk->parent);
203} 203}
204 204
205static void rrpc_put_blks(struct rrpc *rrpc)
206{
207 struct rrpc_lun *rlun;
208 int i;
209
210 for (i = 0; i < rrpc->nr_luns; i++) {
211 rlun = &rrpc->luns[i];
212 if (rlun->cur)
213 rrpc_put_blk(rrpc, rlun->cur);
214 if (rlun->gc_cur)
215 rrpc_put_blk(rrpc, rlun->gc_cur);
216 }
217}
218
205static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) 219static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
206{ 220{
207 int next = atomic_inc_return(&rrpc->next_lun); 221 int next = atomic_inc_return(&rrpc->next_lun);
@@ -1002,7 +1016,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
1002 return 0; 1016 return 0;
1003 1017
1004 /* Bring up the mapping table from device */ 1018 /* Bring up the mapping table from device */
1005 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, 1019 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
1006 rrpc_l2p_update, rrpc); 1020 rrpc_l2p_update, rrpc);
1007 if (ret) { 1021 if (ret) {
1008 pr_err("nvm: rrpc: could not read L2P table.\n"); 1022 pr_err("nvm: rrpc: could not read L2P table.\n");
@@ -1224,18 +1238,21 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
1224 1238
1225 rblk = rrpc_get_blk(rrpc, rlun, 0); 1239 rblk = rrpc_get_blk(rrpc, rlun, 0);
1226 if (!rblk) 1240 if (!rblk)
1227 return -EINVAL; 1241 goto err;
1228 1242
1229 rrpc_set_lun_cur(rlun, rblk); 1243 rrpc_set_lun_cur(rlun, rblk);
1230 1244
1231 /* Emergency gc block */ 1245 /* Emergency gc block */
1232 rblk = rrpc_get_blk(rrpc, rlun, 1); 1246 rblk = rrpc_get_blk(rrpc, rlun, 1);
1233 if (!rblk) 1247 if (!rblk)
1234 return -EINVAL; 1248 goto err;
1235 rlun->gc_cur = rblk; 1249 rlun->gc_cur = rblk;
1236 } 1250 }
1237 1251
1238 return 0; 1252 return 0;
1253err:
1254 rrpc_put_blks(rrpc);
1255 return -EINVAL;
1239} 1256}
1240 1257
1241static struct nvm_tgt_type tt_rrpc; 1258static struct nvm_tgt_type tt_rrpc;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 1fa45695b68a..c219a053c7f6 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1207,6 +1207,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1207 dm_block_t held_root; 1207 dm_block_t held_root;
1208 1208
1209 /* 1209 /*
1210 * We commit to ensure the btree roots which we increment in a
1211 * moment are up to date.
1212 */
1213 __commit_transaction(pmd);
1214
1215 /*
1210 * Copy the superblock. 1216 * Copy the superblock.
1211 */ 1217 */
1212 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); 1218 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
@@ -1538,7 +1544,7 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
1538static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end) 1544static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1539{ 1545{
1540 int r; 1546 int r;
1541 unsigned count; 1547 unsigned count, total_count = 0;
1542 struct dm_pool_metadata *pmd = td->pmd; 1548 struct dm_pool_metadata *pmd = td->pmd;
1543 dm_block_t keys[1] = { td->id }; 1549 dm_block_t keys[1] = { td->id };
1544 __le64 value; 1550 __le64 value;
@@ -1561,11 +1567,29 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
1561 if (r) 1567 if (r)
1562 return r; 1568 return r;
1563 1569
1564 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count); 1570 /*
1565 if (r) 1571 * Remove leaves stops at the first unmapped entry, so we have to
1566 return r; 1572 * loop round finding mapped ranges.
1573 */
1574 while (begin < end) {
1575 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1576 if (r == -ENODATA)
1577 break;
1578
1579 if (r)
1580 return r;
1581
1582 if (begin >= end)
1583 break;
1584
1585 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1586 if (r)
1587 return r;
1588
1589 total_count += count;
1590 }
1567 1591
1568 td->mapped_blocks -= count; 1592 td->mapped_blocks -= total_count;
1569 td->changed = 1; 1593 td->changed = 1;
1570 1594
1571 /* 1595 /*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 807095f4c793..dbedc58d8c00 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -314,8 +314,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
314 */ 314 */
315void mddev_suspend(struct mddev *mddev) 315void mddev_suspend(struct mddev *mddev)
316{ 316{
317 BUG_ON(mddev->suspended); 317 if (mddev->suspended++)
318 mddev->suspended = 1; 318 return;
319 synchronize_rcu(); 319 synchronize_rcu();
320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
321 mddev->pers->quiesce(mddev, 1); 321 mddev->pers->quiesce(mddev, 1);
@@ -326,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
326 326
327void mddev_resume(struct mddev *mddev) 327void mddev_resume(struct mddev *mddev)
328{ 328{
329 mddev->suspended = 0; 329 if (--mddev->suspended)
330 return;
330 wake_up(&mddev->sb_wait); 331 wake_up(&mddev->sb_wait);
331 mddev->pers->quiesce(mddev, 0); 332 mddev->pers->quiesce(mddev, 0);
332 333
@@ -1652,7 +1653,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1652 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1653 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1653 if (mddev->recovery_cp == MaxSector) 1654 if (mddev->recovery_cp == MaxSector)
1654 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 1655 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
1655 rdev->raid_disk = mddev->raid_disks; 1656 rdev->raid_disk = 0;
1656 break; 1657 break;
1657 default: 1658 default:
1658 rdev->saved_raid_disk = role; 1659 rdev->saved_raid_disk = role;
@@ -2773,6 +2774,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2773 /* Activating a spare .. or possibly reactivating 2774 /* Activating a spare .. or possibly reactivating
2774 * if we ever get bitmaps working here. 2775 * if we ever get bitmaps working here.
2775 */ 2776 */
2777 int err;
2776 2778
2777 if (rdev->raid_disk != -1) 2779 if (rdev->raid_disk != -1)
2778 return -EBUSY; 2780 return -EBUSY;
@@ -2794,9 +2796,15 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2794 rdev->saved_raid_disk = -1; 2796 rdev->saved_raid_disk = -1;
2795 clear_bit(In_sync, &rdev->flags); 2797 clear_bit(In_sync, &rdev->flags);
2796 clear_bit(Bitmap_sync, &rdev->flags); 2798 clear_bit(Bitmap_sync, &rdev->flags);
2797 remove_and_add_spares(rdev->mddev, rdev); 2799 err = rdev->mddev->pers->
2798 if (rdev->raid_disk == -1) 2800 hot_add_disk(rdev->mddev, rdev);
2799 return -EBUSY; 2801 if (err) {
2802 rdev->raid_disk = -1;
2803 return err;
2804 } else
2805 sysfs_notify_dirent_safe(rdev->sysfs_state);
2806 if (sysfs_link_rdev(rdev->mddev, rdev))
2807 /* failure here is OK */;
2800 /* don't wakeup anyone, leave that to userspace. */ 2808 /* don't wakeup anyone, leave that to userspace. */
2801 } else { 2809 } else {
2802 if (slot >= rdev->mddev->raid_disks && 2810 if (slot >= rdev->mddev->raid_disks &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2bea51edfab7..ca0b643fe3c1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -566,7 +566,9 @@ static inline char * mdname (struct mddev * mddev)
566static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 566static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
567{ 567{
568 char nm[20]; 568 char nm[20];
569 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 569 if (!test_bit(Replacement, &rdev->flags) &&
570 !test_bit(Journal, &rdev->flags) &&
571 mddev->kobj.sd) {
570 sprintf(nm, "rd%d", rdev->raid_disk); 572 sprintf(nm, "rd%d", rdev->raid_disk);
571 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 573 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
572 } else 574 } else
@@ -576,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
576static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 578static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
577{ 579{
578 char nm[20]; 580 char nm[20];
579 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 581 if (!test_bit(Replacement, &rdev->flags) &&
582 !test_bit(Journal, &rdev->flags) &&
583 mddev->kobj.sd) {
580 sprintf(nm, "rd%d", rdev->raid_disk); 584 sprintf(nm, "rd%d", rdev->raid_disk);
581 sysfs_remove_link(&mddev->kobj, nm); 585 sysfs_remove_link(&mddev->kobj, nm);
582 } 586 }
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index c573402033b2..b1ced58eb5e1 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -63,6 +63,11 @@ int lower_bound(struct btree_node *n, uint64_t key)
63 return bsearch(n, key, 0); 63 return bsearch(n, key, 0);
64} 64}
65 65
66static int upper_bound(struct btree_node *n, uint64_t key)
67{
68 return bsearch(n, key, 1);
69}
70
66void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 71void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
67 struct dm_btree_value_type *vt) 72 struct dm_btree_value_type *vt)
68{ 73{
@@ -252,6 +257,16 @@ static void pop_frame(struct del_stack *s)
252 dm_tm_unlock(s->tm, f->b); 257 dm_tm_unlock(s->tm, f->b);
253} 258}
254 259
260static void unlock_all_frames(struct del_stack *s)
261{
262 struct frame *f;
263
264 while (unprocessed_frames(s)) {
265 f = s->spine + s->top--;
266 dm_tm_unlock(s->tm, f->b);
267 }
268}
269
255int dm_btree_del(struct dm_btree_info *info, dm_block_t root) 270int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
256{ 271{
257 int r; 272 int r;
@@ -308,9 +323,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
308 pop_frame(s); 323 pop_frame(s);
309 } 324 }
310 } 325 }
311
312out: 326out:
327 if (r) {
328 /* cleanup all frames of del_stack */
329 unlock_all_frames(s);
330 }
313 kfree(s); 331 kfree(s);
332
314 return r; 333 return r;
315} 334}
316EXPORT_SYMBOL_GPL(dm_btree_del); 335EXPORT_SYMBOL_GPL(dm_btree_del);
@@ -392,6 +411,82 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
392} 411}
393EXPORT_SYMBOL_GPL(dm_btree_lookup); 412EXPORT_SYMBOL_GPL(dm_btree_lookup);
394 413
414static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
415 uint64_t key, uint64_t *rkey, void *value_le)
416{
417 int r, i;
418 uint32_t flags, nr_entries;
419 struct dm_block *node;
420 struct btree_node *n;
421
422 r = bn_read_lock(info, root, &node);
423 if (r)
424 return r;
425
426 n = dm_block_data(node);
427 flags = le32_to_cpu(n->header.flags);
428 nr_entries = le32_to_cpu(n->header.nr_entries);
429
430 if (flags & INTERNAL_NODE) {
431 i = lower_bound(n, key);
432 if (i < 0 || i >= nr_entries) {
433 r = -ENODATA;
434 goto out;
435 }
436
437 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
438 if (r == -ENODATA && i < (nr_entries - 1)) {
439 i++;
440 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
441 }
442
443 } else {
444 i = upper_bound(n, key);
445 if (i < 0 || i >= nr_entries) {
446 r = -ENODATA;
447 goto out;
448 }
449
450 *rkey = le64_to_cpu(n->keys[i]);
451 memcpy(value_le, value_ptr(n, i), info->value_type.size);
452 }
453out:
454 dm_tm_unlock(info->tm, node);
455 return r;
456}
457
458int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
459 uint64_t *keys, uint64_t *rkey, void *value_le)
460{
461 unsigned level;
462 int r = -ENODATA;
463 __le64 internal_value_le;
464 struct ro_spine spine;
465
466 init_ro_spine(&spine, info);
467 for (level = 0; level < info->levels - 1u; level++) {
468 r = btree_lookup_raw(&spine, root, keys[level],
469 lower_bound, rkey,
470 &internal_value_le, sizeof(uint64_t));
471 if (r)
472 goto out;
473
474 if (*rkey != keys[level]) {
475 r = -ENODATA;
476 goto out;
477 }
478
479 root = le64_to_cpu(internal_value_le);
480 }
481
482 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
483out:
484 exit_ro_spine(&spine);
485 return r;
486}
487
488EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
489
395/* 490/*
396 * Splits a node by creating a sibling node and shifting half the nodes 491 * Splits a node by creating a sibling node and shifting half the nodes
397 * contents across. Assumes there is a parent node, and it has room for 492 * contents across. Assumes there is a parent node, and it has room for
@@ -473,8 +568,10 @@ static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
473 568
474 r = insert_at(sizeof(__le64), pn, parent_index + 1, 569 r = insert_at(sizeof(__le64), pn, parent_index + 1,
475 le64_to_cpu(rn->keys[0]), &location); 570 le64_to_cpu(rn->keys[0]), &location);
476 if (r) 571 if (r) {
572 unlock_block(s->info, right);
477 return r; 573 return r;
574 }
478 575
479 if (key < le64_to_cpu(rn->keys[0])) { 576 if (key < le64_to_cpu(rn->keys[0])) {
480 unlock_block(s->info, right); 577 unlock_block(s->info, right);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index 11d8cf78621d..c74301fa5a37 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -110,6 +110,13 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
110 uint64_t *keys, void *value_le); 110 uint64_t *keys, void *value_le);
111 111
112/* 112/*
113 * Tries to find the first key where the bottom level key is >= to that
114 * given. Useful for skipping empty sections of the btree.
115 */
116int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
117 uint64_t *keys, uint64_t *rkey, void *value_le);
118
119/*
113 * Insertion (or overwrite an existing value). O(ln(n)) 120 * Insertion (or overwrite an existing value). O(ln(n))
114 */ 121 */
115int dm_btree_insert(struct dm_btree_info *info, dm_block_t root, 122int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
@@ -135,9 +142,10 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
135 uint64_t *keys, dm_block_t *new_root); 142 uint64_t *keys, dm_block_t *new_root);
136 143
137/* 144/*
138 * Removes values between 'keys' and keys2, where keys2 is keys with the 145 * Removes a _contiguous_ run of values starting from 'keys' and not
139 * final key replaced with 'end_key'. 'end_key' is the one-past-the-end 146 * reaching keys2 (where keys2 is keys with the final key replaced with
140 * value. 'keys' may be altered. 147 * 'end_key'). 'end_key' is the one-past-the-end value. 'keys' may be
148 * altered.
141 */ 149 */
142int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root, 150int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
143 uint64_t *keys, uint64_t end_key, 151 uint64_t *keys, uint64_t end_key,
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 53091295fce9..fca6dbcf9a47 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
136 return 0; 136 return 0;
137} 137}
138 138
139static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) 139static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
140{ 140{
141 struct block_op *bop; 141 struct block_op *bop;
142 142
@@ -147,6 +147,17 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
147 result->type = bop->type; 147 result->type = bop->type;
148 result->block = bop->block; 148 result->block = bop->block;
149 149
150 return 0;
151}
152
153static int brb_pop(struct bop_ring_buffer *brb)
154{
155 struct block_op *bop;
156
157 if (brb_empty(brb))
158 return -ENODATA;
159
160 bop = brb->bops + brb->begin;
150 brb->begin = brb_next(brb, brb->begin); 161 brb->begin = brb_next(brb, brb->begin);
151 162
152 return 0; 163 return 0;
@@ -211,7 +222,7 @@ static int apply_bops(struct sm_metadata *smm)
211 while (!brb_empty(&smm->uncommitted)) { 222 while (!brb_empty(&smm->uncommitted)) {
212 struct block_op bop; 223 struct block_op bop;
213 224
214 r = brb_pop(&smm->uncommitted, &bop); 225 r = brb_peek(&smm->uncommitted, &bop);
215 if (r) { 226 if (r) {
216 DMERR("bug in bop ring buffer"); 227 DMERR("bug in bop ring buffer");
217 break; 228 break;
@@ -220,6 +231,8 @@ static int apply_bops(struct sm_metadata *smm)
220 r = commit_bop(smm, &bop); 231 r = commit_bop(smm, &bop);
221 if (r) 232 if (r)
222 break; 233 break;
234
235 brb_pop(&smm->uncommitted);
223 } 236 }
224 237
225 return r; 238 return r;
@@ -683,7 +696,6 @@ static struct dm_space_map bootstrap_ops = {
683static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) 696static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
684{ 697{
685 int r, i; 698 int r, i;
686 enum allocation_event ev;
687 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 699 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
688 dm_block_t old_len = smm->ll.nr_blocks; 700 dm_block_t old_len = smm->ll.nr_blocks;
689 701
@@ -705,11 +717,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
705 * allocate any new blocks. 717 * allocate any new blocks.
706 */ 718 */
707 do { 719 do {
708 for (i = old_len; !r && i < smm->begin; i++) { 720 for (i = old_len; !r && i < smm->begin; i++)
709 r = sm_ll_inc(&smm->ll, i, &ev); 721 r = add_bop(smm, BOP_INC, i);
710 if (r) 722
711 goto out; 723 if (r)
712 } 724 goto out;
725
713 old_len = smm->begin; 726 old_len = smm->begin;
714 727
715 r = apply_bops(smm); 728 r = apply_bops(smm);
@@ -754,7 +767,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
754{ 767{
755 int r; 768 int r;
756 dm_block_t i; 769 dm_block_t i;
757 enum allocation_event ev;
758 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 770 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
759 771
760 smm->begin = superblock + 1; 772 smm->begin = superblock + 1;
@@ -782,7 +794,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
782 * allocated blocks that they were built from. 794 * allocated blocks that they were built from.
783 */ 795 */
784 for (i = superblock; !r && i < smm->begin; i++) 796 for (i = superblock; !r && i < smm->begin; i++)
785 r = sm_ll_inc(&smm->ll, i, &ev); 797 r = add_bop(smm, BOP_INC, i);
786 798
787 if (r) 799 if (r)
788 return r; 800 return r;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 41d70bc9ba2f..84e597e1c489 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1946,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1946 1946
1947 first = i; 1947 first = i;
1948 fbio = r10_bio->devs[i].bio; 1948 fbio = r10_bio->devs[i].bio;
1949 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
1950 fbio->bi_iter.bi_idx = 0;
1949 1951
1950 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 1952 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1951 /* now find blocks with errors */ 1953 /* now find blocks with errors */
@@ -1989,7 +1991,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1989 bio_reset(tbio); 1991 bio_reset(tbio);
1990 1992
1991 tbio->bi_vcnt = vcnt; 1993 tbio->bi_vcnt = vcnt;
1992 tbio->bi_iter.bi_size = r10_bio->sectors << 9; 1994 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
1993 tbio->bi_rw = WRITE; 1995 tbio->bi_rw = WRITE;
1994 tbio->bi_private = r10_bio; 1996 tbio->bi_private = r10_bio;
1995 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 1997 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 8616fa8193bc..c2e60b4f292d 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -805,11 +805,11 @@ static void ivtv_init_struct2(struct ivtv *itv)
805{ 805{
806 int i; 806 int i;
807 807
808 for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++) 808 for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++)
809 if (itv->card->video_inputs[i].video_type == 0) 809 if (itv->card->video_inputs[i].video_type == 0)
810 break; 810 break;
811 itv->nof_inputs = i; 811 itv->nof_inputs = i;
812 for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++) 812 for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++)
813 if (itv->card->audio_inputs[i].audio_type == 0) 813 if (itv->card->audio_inputs[i].audio_type == 0)
814 break; 814 break;
815 itv->nof_audio_inputs = i; 815 itv->nof_audio_inputs = i;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index fcbb49757614..565a59310747 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -134,7 +134,7 @@ struct airspy {
134 int urbs_submitted; 134 int urbs_submitted;
135 135
136 /* USB control message buffer */ 136 /* USB control message buffer */
137 #define BUF_SIZE 24 137 #define BUF_SIZE 128
138 u8 buf[BUF_SIZE]; 138 u8 buf[BUF_SIZE];
139 139
140 /* Current configuration */ 140 /* Current configuration */
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index e05bfec90f46..0fe5cb2c260c 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -24,6 +24,15 @@
24#include <media/videobuf2-v4l2.h> 24#include <media/videobuf2-v4l2.h>
25#include <media/videobuf2-vmalloc.h> 25#include <media/videobuf2-vmalloc.h>
26 26
27/*
28 * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too
29 * strong signal or transmitting to bad antenna.
30 * Set RF gain control to 'grabbed' state by default for sure.
31 */
32static bool hackrf_enable_rf_gain_ctrl;
33module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644);
34MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)");
35
27/* HackRF USB API commands (from HackRF Library) */ 36/* HackRF USB API commands (from HackRF Library) */
28enum { 37enum {
29 CMD_SET_TRANSCEIVER_MODE = 0x01, 38 CMD_SET_TRANSCEIVER_MODE = 0x01,
@@ -1451,6 +1460,7 @@ static int hackrf_probe(struct usb_interface *intf,
1451 dev_err(dev->dev, "Could not initialize controls\n"); 1460 dev_err(dev->dev, "Could not initialize controls\n");
1452 goto err_v4l2_ctrl_handler_free_rx; 1461 goto err_v4l2_ctrl_handler_free_rx;
1453 } 1462 }
1463 v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl);
1454 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); 1464 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler);
1455 1465
1456 /* Register controls for transmitter */ 1466 /* Register controls for transmitter */
@@ -1471,6 +1481,7 @@ static int hackrf_probe(struct usb_interface *intf,
1471 dev_err(dev->dev, "Could not initialize controls\n"); 1481 dev_err(dev->dev, "Could not initialize controls\n");
1472 goto err_v4l2_ctrl_handler_free_tx; 1482 goto err_v4l2_ctrl_handler_free_tx;
1473 } 1483 }
1484 v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl);
1474 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); 1485 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler);
1475 1486
1476 /* Register the v4l2_device structure */ 1487 /* Register the v4l2_device structure */
@@ -1530,7 +1541,7 @@ err_v4l2_ctrl_handler_free_rx:
1530err_kfree: 1541err_kfree:
1531 kfree(dev); 1542 kfree(dev);
1532err: 1543err:
1533 dev_dbg(dev->dev, "failed=%d\n", ret); 1544 dev_dbg(&intf->dev, "failed=%d\n", ret);
1534 return ret; 1545 return ret;
1535} 1546}
1536 1547
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index d2e75c88f4d2..f40909793490 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -497,6 +497,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
497{ 497{
498 u64 sr = 0; 498 u64 sr = 0;
499 499
500 set_endian(sr);
500 if (ctx->master) 501 if (ctx->master)
501 sr |= CXL_PSL_SR_An_MP; 502 sr |= CXL_PSL_SR_An_MP;
502 if (mfspr(SPRN_LPCR) & LPCR_TC) 503 if (mfspr(SPRN_LPCR) & LPCR_TC)
@@ -506,7 +507,6 @@ static u64 calculate_sr(struct cxl_context *ctx)
506 sr |= CXL_PSL_SR_An_HV; 507 sr |= CXL_PSL_SR_An_HV;
507 } else { 508 } else {
508 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; 509 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
509 set_endian(sr);
510 sr &= ~(CXL_PSL_SR_An_HV); 510 sr &= ~(CXL_PSL_SR_An_HV);
511 if (!test_tsk_thread_flag(current, TIF_32BIT)) 511 if (!test_tsk_thread_flag(current, TIF_32BIT))
512 sr |= CXL_PSL_SR_An_SF; 512 sr |= CXL_PSL_SR_An_SF;
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 669c3452f278..9ed6038e47d2 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -46,10 +46,18 @@ static int parse_ofpart_partitions(struct mtd_info *master,
46 46
47 ofpart_node = of_get_child_by_name(mtd_node, "partitions"); 47 ofpart_node = of_get_child_by_name(mtd_node, "partitions");
48 if (!ofpart_node) { 48 if (!ofpart_node) {
49 pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", 49 /*
50 master->name, mtd_node->full_name); 50 * We might get here even when ofpart isn't used at all (e.g.,
51 * when using another parser), so don't be louder than
52 * KERN_DEBUG
53 */
54 pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
55 master->name, mtd_node->full_name);
51 ofpart_node = mtd_node; 56 ofpart_node = mtd_node;
52 dedicated = false; 57 dedicated = false;
58 } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) {
59 /* The 'partitions' subnode might be used by another parser */
60 return 0;
53 } 61 }
54 62
55 /* First count the subnodes */ 63 /* First count the subnodes */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 970781a9e677..f6a7161e3b85 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
1849 usleep_range(10, 15); 1849 usleep_range(10, 15);
1850 1850
1851 /* Poll Until Poll Condition */ 1851 /* Poll Until Poll Condition */
1852 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1852 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1853 usleep_range(500, 600); 1853 usleep_range(500, 600);
1854 1854
1855 if (!count) 1855 if (!count)
@@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1873 /* Poll Until Poll Condition */ 1873 /* Poll Until Poll Condition */
1874 for (i = 0; i < pdata->tx_q_count; i++) { 1874 for (i = 0; i < pdata->tx_q_count; i++) {
1875 count = 2000; 1875 count = 2000;
1876 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, 1876 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
1877 MTL_Q_TQOMR, FTQ)) 1877 MTL_Q_TQOMR, FTQ))
1878 usleep_range(500, 600); 1878 usleep_range(500, 600);
1879 1879
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 9147a0107c44..d0ae1a6cc212 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
289 struct sk_buff *skb) 289 struct sk_buff *skb)
290{ 290{
291 struct device *dev = ndev_to_dev(tx_ring->ndev); 291 struct device *dev = ndev_to_dev(tx_ring->ndev);
292 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
292 struct xgene_enet_raw_desc *raw_desc; 293 struct xgene_enet_raw_desc *raw_desc;
293 __le64 *exp_desc = NULL, *exp_bufs = NULL; 294 __le64 *exp_desc = NULL, *exp_bufs = NULL;
294 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; 295 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
@@ -419,6 +420,7 @@ out:
419 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 420 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
420 SET_VAL(USERINFO, tx_ring->tail)); 421 SET_VAL(USERINFO, tx_ring->tail));
421 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 422 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
423 pdata->tx_level += count;
422 tx_ring->tail = tail; 424 tx_ring->tail = tail;
423 425
424 return count; 426 return count;
@@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
429{ 431{
430 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 432 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
431 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; 433 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
432 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; 434 u32 tx_level = pdata->tx_level;
433 u32 tx_level, cq_level;
434 int count; 435 int count;
435 436
436 tx_level = pdata->ring_ops->len(tx_ring); 437 if (tx_level < pdata->txc_level)
437 cq_level = pdata->ring_ops->len(cp_ring); 438 tx_level += ((typeof(pdata->tx_level))~0U);
438 if (unlikely(tx_level > pdata->tx_qcnt_hi || 439
439 cq_level > pdata->cp_qcnt_hi)) { 440 if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
440 netif_stop_queue(ndev); 441 netif_stop_queue(ndev);
441 return NETDEV_TX_BUSY; 442 return NETDEV_TX_BUSY;
442 } 443 }
@@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
539 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 540 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
540 u16 head = ring->head; 541 u16 head = ring->head;
541 u16 slots = ring->slots - 1; 542 u16 slots = ring->slots - 1;
542 int ret, count = 0, processed = 0; 543 int ret, desc_count, count = 0, processed = 0;
544 bool is_completion;
543 545
544 do { 546 do {
545 raw_desc = &ring->raw_desc[head]; 547 raw_desc = &ring->raw_desc[head];
548 desc_count = 0;
549 is_completion = false;
546 exp_desc = NULL; 550 exp_desc = NULL;
547 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 551 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
548 break; 552 break;
@@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
559 } 563 }
560 dma_rmb(); 564 dma_rmb();
561 count++; 565 count++;
566 desc_count++;
562 } 567 }
563 if (is_rx_desc(raw_desc)) 568 if (is_rx_desc(raw_desc)) {
564 ret = xgene_enet_rx_frame(ring, raw_desc); 569 ret = xgene_enet_rx_frame(ring, raw_desc);
565 else 570 } else {
566 ret = xgene_enet_tx_completion(ring, raw_desc); 571 ret = xgene_enet_tx_completion(ring, raw_desc);
572 is_completion = true;
573 }
567 xgene_enet_mark_desc_slot_empty(raw_desc); 574 xgene_enet_mark_desc_slot_empty(raw_desc);
568 if (exp_desc) 575 if (exp_desc)
569 xgene_enet_mark_desc_slot_empty(exp_desc); 576 xgene_enet_mark_desc_slot_empty(exp_desc);
570 577
571 head = (head + 1) & slots; 578 head = (head + 1) & slots;
572 count++; 579 count++;
580 desc_count++;
573 processed++; 581 processed++;
582 if (is_completion)
583 pdata->txc_level += desc_count;
574 584
575 if (ret) 585 if (ret)
576 break; 586 break;
@@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
580 pdata->ring_ops->wr_cmd(ring, -count); 590 pdata->ring_ops->wr_cmd(ring, -count);
581 ring->head = head; 591 ring->head = head;
582 592
583 if (netif_queue_stopped(ring->ndev)) { 593 if (netif_queue_stopped(ring->ndev))
584 if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) 594 netif_start_queue(ring->ndev);
585 netif_wake_queue(ring->ndev);
586 }
587 } 595 }
588 596
589 return processed; 597 return processed;
@@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
1033 pdata->tx_ring->cp_ring = cp_ring; 1041 pdata->tx_ring->cp_ring = cp_ring;
1034 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1042 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1035 1043
1036 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; 1044 pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
1037 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
1038 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
1039 1045
1040 return 0; 1046 return 0;
1041 1047
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index a6e56b88c0a0..1aa72c787f8d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -155,11 +155,11 @@ struct xgene_enet_pdata {
155 enum xgene_enet_id enet_id; 155 enum xgene_enet_id enet_id;
156 struct xgene_enet_desc_ring *tx_ring; 156 struct xgene_enet_desc_ring *tx_ring;
157 struct xgene_enet_desc_ring *rx_ring; 157 struct xgene_enet_desc_ring *rx_ring;
158 u16 tx_level;
159 u16 txc_level;
158 char *dev_name; 160 char *dev_name;
159 u32 rx_buff_cnt; 161 u32 rx_buff_cnt;
160 u32 tx_qcnt_hi; 162 u32 tx_qcnt_hi;
161 u32 cp_qcnt_hi;
162 u32 cp_qcnt_low;
163 u32 rx_irq; 163 u32 rx_irq;
164 u32 txc_irq; 164 u32 txc_irq;
165 u8 cq_cnt; 165 u8 cq_cnt;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2795d6db10e1..8b5988e210d5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1016 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1016 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
1017 8 * 4; 1017 8 * 4;
1018 1018
1019 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1019 ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
1020 &ring_header->dma); 1020 &ring_header->dma, GFP_KERNEL);
1021 if (unlikely(!ring_header->desc)) { 1021 if (unlikely(!ring_header->desc)) {
1022 dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); 1022 dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1023 goto err_nomem; 1023 goto err_nomem;
1024 } 1024 }
1025 memset(ring_header->desc, 0, ring_header->size);
1026 /* init TPD ring */ 1025 /* init TPD ring */
1027 1026
1028 tpd_ring[0].dma = roundup(ring_header->dma, 8); 1027 tpd_ring[0].dma = roundup(ring_header->dma, 8);
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
index a3c7106fdf85..8ba7f8ff3434 100644
--- a/drivers/net/ethernet/aurora/Kconfig
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -13,6 +13,7 @@ if NET_VENDOR_AURORA
13 13
14config AURORA_NB8800 14config AURORA_NB8800
15 tristate "Aurora AU-NB8800 support" 15 tristate "Aurora AU-NB8800 support"
16 depends on HAS_DMA
16 select PHYLIB 17 select PHYLIB
17 help 18 help
18 Support for the AU-NB8800 gigabit Ethernet controller. 19 Support for the AU-NB8800 gigabit Ethernet controller.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index bdf094fb6ef9..07f5f239cb65 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2693 req.ver_upd = DRV_VER_UPD; 2693 req.ver_upd = DRV_VER_UPD;
2694 2694
2695 if (BNXT_PF(bp)) { 2695 if (BNXT_PF(bp)) {
2696 unsigned long vf_req_snif_bmap[4]; 2696 DECLARE_BITMAP(vf_req_snif_bmap, 256);
2697 u32 *data = (u32 *)vf_req_snif_bmap; 2697 u32 *data = (u32 *)vf_req_snif_bmap;
2698 2698
2699 memset(vf_req_snif_bmap, 0, 32); 2699 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2700 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 2700 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2701 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 2701 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2702 2702
2703 for (i = 0; i < 8; i++) { 2703 for (i = 0; i < 8; i++)
2704 req.vf_req_fwd[i] = cpu_to_le32(*data); 2704 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2705 data++; 2705
2706 }
2707 req.enables |= 2706 req.enables |=
2708 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 2707 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2709 } 2708 }
@@ -4603,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4603 bp->nge_port_cnt = 1; 4602 bp->nge_port_cnt = 1;
4604 } 4603 }
4605 4604
4606 bp->state = BNXT_STATE_OPEN; 4605 set_bit(BNXT_STATE_OPEN, &bp->state);
4607 bnxt_enable_int(bp); 4606 bnxt_enable_int(bp);
4608 /* Enable TX queues */ 4607 /* Enable TX queues */
4609 bnxt_tx_enable(bp); 4608 bnxt_tx_enable(bp);
@@ -4679,8 +4678,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4679 /* Change device state to avoid TX queue wake up's */ 4678 /* Change device state to avoid TX queue wake up's */
4680 bnxt_tx_disable(bp); 4679 bnxt_tx_disable(bp);
4681 4680
4682 bp->state = BNXT_STATE_CLOSED; 4681 clear_bit(BNXT_STATE_OPEN, &bp->state);
4683 cancel_work_sync(&bp->sp_task); 4682 smp_mb__after_atomic();
4683 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4684 msleep(20);
4684 4685
4685 /* Flush rings before disabling interrupts */ 4686 /* Flush rings before disabling interrupts */
4686 bnxt_shutdown_nic(bp, irq_re_init); 4687 bnxt_shutdown_nic(bp, irq_re_init);
@@ -5030,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
5030static void bnxt_reset_task(struct bnxt *bp) 5031static void bnxt_reset_task(struct bnxt *bp)
5031{ 5032{
5032 bnxt_dbg_dump_states(bp); 5033 bnxt_dbg_dump_states(bp);
5033 if (netif_running(bp->dev)) 5034 if (netif_running(bp->dev)) {
5034 bnxt_tx_disable(bp); /* prevent tx timout again */ 5035 bnxt_close_nic(bp, false, false);
5036 bnxt_open_nic(bp, false, false);
5037 }
5035} 5038}
5036 5039
5037static void bnxt_tx_timeout(struct net_device *dev) 5040static void bnxt_tx_timeout(struct net_device *dev)
@@ -5081,8 +5084,12 @@ static void bnxt_sp_task(struct work_struct *work)
5081 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 5084 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5082 int rc; 5085 int rc;
5083 5086
5084 if (bp->state != BNXT_STATE_OPEN) 5087 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5088 smp_mb__after_atomic();
5089 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5090 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5085 return; 5091 return;
5092 }
5086 5093
5087 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 5094 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5088 bnxt_cfg_rx_mode(bp); 5095 bnxt_cfg_rx_mode(bp);
@@ -5106,8 +5113,19 @@ static void bnxt_sp_task(struct work_struct *work)
5106 bnxt_hwrm_tunnel_dst_port_free( 5113 bnxt_hwrm_tunnel_dst_port_free(
5107 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5114 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5108 } 5115 }
5109 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 5116 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5117 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5118 * for BNXT_STATE_IN_SP_TASK to clear.
5119 */
5120 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5121 rtnl_lock();
5110 bnxt_reset_task(bp); 5122 bnxt_reset_task(bp);
5123 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5124 rtnl_unlock();
5125 }
5126
5127 smp_mb__before_atomic();
5128 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5111} 5129}
5112 5130
5113static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 5131static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -5186,7 +5204,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5186 bp->timer.function = bnxt_timer; 5204 bp->timer.function = bnxt_timer;
5187 bp->current_interval = BNXT_TIMER_INTERVAL; 5205 bp->current_interval = BNXT_TIMER_INTERVAL;
5188 5206
5189 bp->state = BNXT_STATE_CLOSED; 5207 clear_bit(BNXT_STATE_OPEN, &bp->state);
5190 5208
5191 return 0; 5209 return 0;
5192 5210
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 674bc5159b91..f199f4cc8ffe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -925,9 +925,9 @@ struct bnxt {
925 925
926 struct timer_list timer; 926 struct timer_list timer;
927 927
928 int state; 928 unsigned long state;
929#define BNXT_STATE_CLOSED 0 929#define BNXT_STATE_OPEN 0
930#define BNXT_STATE_OPEN 1 930#define BNXT_STATE_IN_SP_TASK 1
931 931
932 struct bnxt_irq *irq_tbl; 932 struct bnxt_irq *irq_tbl;
933 u8 mac_addr[ETH_ALEN]; 933 u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 7a9af2887d8e..ea044bbcd384 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -21,7 +21,7 @@
21#ifdef CONFIG_BNXT_SRIOV 21#ifdef CONFIG_BNXT_SRIOV
22static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 22static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
23{ 23{
24 if (bp->state != BNXT_STATE_OPEN) { 24 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
25 netdev_err(bp->dev, "vf ndo called though PF is down\n"); 25 netdev_err(bp->dev, "vf ndo called though PF is down\n");
26 return -EINVAL; 26 return -EINVAL;
27 } 27 }
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 4b7fd63ae57c..5f24d11cb16a 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -37,7 +37,6 @@ struct nicpf {
37#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 37#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
38#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 38#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
39 u8 vf_lmac_map[MAX_LMAC]; 39 u8 vf_lmac_map[MAX_LMAC];
40 u8 lmac_cnt;
41 struct delayed_work dwork; 40 struct delayed_work dwork;
42 struct workqueue_struct *check_link; 41 struct workqueue_struct *check_link;
43 u8 link[MAX_LMAC]; 42 u8 link[MAX_LMAC];
@@ -280,7 +279,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
280 u64 lmac_credit; 279 u64 lmac_credit;
281 280
282 nic->num_vf_en = 0; 281 nic->num_vf_en = 0;
283 nic->lmac_cnt = 0;
284 282
285 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 283 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
286 if (!(bgx_map & (1 << bgx))) 284 if (!(bgx_map & (1 << bgx)))
@@ -290,7 +288,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
290 nic->vf_lmac_map[next_bgx_lmac++] = 288 nic->vf_lmac_map[next_bgx_lmac++] =
291 NIC_SET_VF_LMAC_MAP(bgx, lmac); 289 NIC_SET_VF_LMAC_MAP(bgx, lmac);
292 nic->num_vf_en += lmac_cnt; 290 nic->num_vf_en += lmac_cnt;
293 nic->lmac_cnt += lmac_cnt;
294 291
295 /* Program LMAC credits */ 292 /* Program LMAC credits */
296 lmac_credit = (1ull << 1); /* channel credit enable */ 293 lmac_credit = (1ull << 1); /* channel credit enable */
@@ -618,6 +615,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
618 return 0; 615 return 0;
619} 616}
620 617
618static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
619{
620 int bgx, lmac;
621
622 nic->vf_enabled[vf] = enable;
623
624 if (vf >= nic->num_vf_en)
625 return;
626
627 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
628 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
629
630 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
631}
632
621/* Interrupt handler to handle mailbox messages from VFs */ 633/* Interrupt handler to handle mailbox messages from VFs */
622static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 634static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
623{ 635{
@@ -717,29 +729,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
717 break; 729 break;
718 case NIC_MBOX_MSG_CFG_DONE: 730 case NIC_MBOX_MSG_CFG_DONE:
719 /* Last message of VF config msg sequence */ 731 /* Last message of VF config msg sequence */
720 nic->vf_enabled[vf] = true; 732 nic_enable_vf(nic, vf, true);
721 if (vf >= nic->lmac_cnt)
722 goto unlock;
723
724 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
725 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
726
727 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
728 goto unlock; 733 goto unlock;
729 case NIC_MBOX_MSG_SHUTDOWN: 734 case NIC_MBOX_MSG_SHUTDOWN:
730 /* First msg in VF teardown sequence */ 735 /* First msg in VF teardown sequence */
731 nic->vf_enabled[vf] = false;
732 if (vf >= nic->num_vf_en) 736 if (vf >= nic->num_vf_en)
733 nic->sqs_used[vf - nic->num_vf_en] = false; 737 nic->sqs_used[vf - nic->num_vf_en] = false;
734 nic->pqs_vf[vf] = 0; 738 nic->pqs_vf[vf] = 0;
735 739 nic_enable_vf(nic, vf, false);
736 if (vf >= nic->lmac_cnt)
737 break;
738
739 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
740 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
741
742 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
743 break; 740 break;
744 case NIC_MBOX_MSG_ALLOC_SQS: 741 case NIC_MBOX_MSG_ALLOC_SQS:
745 nic_alloc_sqs(nic, &mbx.sqs_alloc); 742 nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -958,7 +955,7 @@ static void nic_poll_for_link(struct work_struct *work)
958 955
959 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 956 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
960 957
961 for (vf = 0; vf < nic->lmac_cnt; vf++) { 958 for (vf = 0; vf < nic->num_vf_en; vf++) {
962 /* Poll only if VF is UP */ 959 /* Poll only if VF is UP */
963 if (!nic->vf_enabled[vf]) 960 if (!nic->vf_enabled[vf])
964 continue; 961 continue;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 63c2bcf8031a..b1026689b78f 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
48 *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 48 *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
49 else { /* !dst_is_aligned */ 49 else { /* !dst_is_aligned */
50 for (i = 0; i < len; i++, reg++) { 50 for (i = 0; i < len; i++, reg++) {
51 u32 buf = 51 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
52 nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 52 put_unaligned(buf, reg);
53
54 /* to accommodate word-unaligned address of "reg"
55 * we have to do memcpy_toio() instead of simple "=".
56 */
57 memcpy_toio((void __iomem *)reg, &buf, sizeof(buf));
58 } 53 }
59 } 54 }
60 55
61 /* copy last bytes (if any) */ 56 /* copy last bytes (if any) */
62 if (last) { 57 if (last) {
63 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 58 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
64 59 memcpy((u8*)reg, &buf, last);
65 memcpy_toio((void __iomem *)reg, &buf, last);
66 } 60 }
67} 61}
68 62
@@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev,
367 struct nps_enet_tx_ctl tx_ctrl; 361 struct nps_enet_tx_ctl tx_ctrl;
368 short length = skb->len; 362 short length = skb->len;
369 u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); 363 u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
370 u32 *src = (u32 *)virt_to_phys(skb->data); 364 u32 *src = (void *)skb->data;
371 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); 365 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
372 366
373 tx_ctrl.value = 0; 367 tx_ctrl.value = 0;
@@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev,
375 if (src_is_aligned) 369 if (src_is_aligned)
376 for (i = 0; i < len; i++, src++) 370 for (i = 0; i < len; i++, src++)
377 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); 371 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
378 else { /* !src_is_aligned */ 372 else /* !src_is_aligned */
379 for (i = 0; i < len; i++, src++) { 373 for (i = 0; i < len; i++, src++)
380 u32 buf; 374 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
381 375 get_unaligned(src));
382 /* to accommodate word-unaligned address of "src" 376
383 * we have to do memcpy_fromio() instead of simple "="
384 */
385 memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf));
386 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf);
387 }
388 }
389 /* Write the length of the Frame */ 377 /* Write the length of the Frame */
390 tx_ctrl.nt = length; 378 tx_ctrl.nt = length;
391 379
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 08f5b911d96b..52e0091b4fb2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
552 cbd_t __iomem *prev_bd; 552 cbd_t __iomem *prev_bd;
553 cbd_t __iomem *last_tx_bd; 553 cbd_t __iomem *last_tx_bd;
554 554
555 last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); 555 last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
556 556
557 /* get the current bd held in TBPTR and scan back from this point */ 557 /* get the current bd held in TBPTR and scan back from this point */
558 recheck_bd = curr_tbptr = (cbd_t __iomem *) 558 recheck_bd = curr_tbptr = (cbd_t __iomem *)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 55c36230e176..40071dad1c57 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
464 * address). Print error message but continue anyway. 464 * address). Print error message but continue anyway.
465 */ 465 */
466 if ((void *)tbipa > priv->map + resource_size(&res) - 4) 466 if ((void *)tbipa > priv->map + resource_size(&res) - 4)
467 dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", 467 dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
468 ((void *)tbipa - priv->map) + 4); 468 ((void *)tbipa - priv->map) + 4);
469 469
470 iowrite32be(be32_to_cpup(prop), tbipa); 470 iowrite32be(be32_to_cpup(prop), tbipa);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7cf898455e60..3e233d924cce 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -894,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
894 FSL_GIANFAR_DEV_HAS_VLAN | 894 FSL_GIANFAR_DEV_HAS_VLAN |
895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
897 FSL_GIANFAR_DEV_HAS_TIMER; 897 FSL_GIANFAR_DEV_HAS_TIMER |
898 FSL_GIANFAR_DEV_HAS_RX_FILER;
898 899
899 err = of_property_read_string(np, "phy-connection-type", &ctype); 900 err = of_property_read_string(np, "phy-connection-type", &ctype);
900 901
@@ -1396,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev)
1396 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1397 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1397 } 1398 }
1398 1399
1399 /* always enable rx filer */ 1400 /* Always enable rx filer if available */
1400 priv->rx_filer_enable = 1; 1401 priv->rx_filer_enable =
1402 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
1401 /* Enable most messages by default */ 1403 /* Enable most messages by default */
1402 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1404 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1403 /* use pritority h/w tx queue scheduling for single queue devices */ 1405 /* use pritority h/w tx queue scheduling for single queue devices */
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f266b20f9ef5..cb77667971a7 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -923,6 +923,7 @@ struct gfar {
923#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 923#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
924#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 924#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
925#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 925#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
926#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
926 927
927#if (MAXGROUPS == 2) 928#if (MAXGROUPS == 2)
928#define DEFAULT_MAPPING 0xAA 929#define DEFAULT_MAPPING 0xAA
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 2a98eba660c0..b674414a4d72 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1259,12 +1259,8 @@ int hns_dsaf_set_mac_uc_entry(
1259 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1259 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1260 MAC_IS_BROADCAST(mac_entry->addr) || 1260 MAC_IS_BROADCAST(mac_entry->addr) ||
1261 MAC_IS_MULTICAST(mac_entry->addr)) { 1261 MAC_IS_MULTICAST(mac_entry->addr)) {
1262 dev_err(dsaf_dev->dev, 1262 dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n",
1263 "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1263 dsaf_dev->ae_dev.name, mac_entry->addr);
1264 dsaf_dev->ae_dev.name, mac_entry->addr[0],
1265 mac_entry->addr[1], mac_entry->addr[2],
1266 mac_entry->addr[3], mac_entry->addr[4],
1267 mac_entry->addr[5]);
1268 return -EINVAL; 1264 return -EINVAL;
1269 } 1265 }
1270 1266
@@ -1331,12 +1327,8 @@ int hns_dsaf_set_mac_mc_entry(
1331 1327
1332 /* mac addr check */ 1328 /* mac addr check */
1333 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1329 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1334 dev_err(dsaf_dev->dev, 1330 dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
1335 "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1331 dsaf_dev->ae_dev.name, mac_entry->addr);
1336 dsaf_dev->ae_dev.name, mac_entry->addr[0],
1337 mac_entry->addr[1], mac_entry->addr[2],
1338 mac_entry->addr[3],
1339 mac_entry->addr[4], mac_entry->addr[5]);
1340 return -EINVAL; 1332 return -EINVAL;
1341 } 1333 }
1342 1334
@@ -1410,11 +1402,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1410 1402
1411 /*chechk mac addr */ 1403 /*chechk mac addr */
1412 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1404 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1413 dev_err(dsaf_dev->dev, 1405 dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n",
1414 "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1406 mac_entry->addr);
1415 mac_entry->addr[0], mac_entry->addr[1],
1416 mac_entry->addr[2], mac_entry->addr[3],
1417 mac_entry->addr[4], mac_entry->addr[5]);
1418 return -EINVAL; 1407 return -EINVAL;
1419 } 1408 }
1420 1409
@@ -1497,9 +1486,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
1497 1486
1498 /*check mac addr */ 1487 /*check mac addr */
1499 if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { 1488 if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
1500 dev_err(dsaf_dev->dev, 1489 dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n",
1501 "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1490 addr);
1502 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1503 return -EINVAL; 1491 return -EINVAL;
1504 } 1492 }
1505 1493
@@ -1563,11 +1551,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
1563 1551
1564 /*check mac addr */ 1552 /*check mac addr */
1565 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1553 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1566 dev_err(dsaf_dev->dev, 1554 dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
1567 "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1555 mac_entry->addr);
1568 mac_entry->addr[0], mac_entry->addr[1],
1569 mac_entry->addr[2], mac_entry->addr[3],
1570 mac_entry->addr[4], mac_entry->addr[5]);
1571 return -EINVAL; 1556 return -EINVAL;
1572 } 1557 }
1573 1558
@@ -1644,11 +1629,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
1644 /* check macaddr */ 1629 /* check macaddr */
1645 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1630 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1646 MAC_IS_BROADCAST(mac_entry->addr)) { 1631 MAC_IS_BROADCAST(mac_entry->addr)) {
1647 dev_err(dsaf_dev->dev, 1632 dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
1648 "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1633 mac_entry->addr);
1649 mac_entry->addr[0], mac_entry->addr[1],
1650 mac_entry->addr[2], mac_entry->addr[3],
1651 mac_entry->addr[4], mac_entry->addr[5]);
1652 return -EINVAL; 1634 return -EINVAL;
1653 } 1635 }
1654 1636
@@ -1695,11 +1677,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
1695 /*check mac addr */ 1677 /*check mac addr */
1696 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1678 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1697 MAC_IS_BROADCAST(mac_entry->addr)) { 1679 MAC_IS_BROADCAST(mac_entry->addr)) {
1698 dev_err(dsaf_dev->dev, 1680 dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
1699 "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1681 mac_entry->addr);
1700 mac_entry->addr[0], mac_entry->addr[1],
1701 mac_entry->addr[2], mac_entry->addr[3],
1702 mac_entry->addr[4], mac_entry->addr[5]);
1703 return -EINVAL; 1682 return -EINVAL;
1704 } 1683 }
1705 1684
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b475e1bf2e6f..bdbd80423b17 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -898,7 +898,7 @@
898#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 898#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
899#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 899#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
900 900
901static inline void dsaf_write_reg(void *base, u32 reg, u32 value) 901static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
902{ 902{
903 u8 __iomem *reg_addr = ACCESS_ONCE(base); 903 u8 __iomem *reg_addr = ACCESS_ONCE(base);
904 904
@@ -908,7 +908,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
908#define dsaf_write_dev(a, reg, value) \ 908#define dsaf_write_dev(a, reg, value) \
909 dsaf_write_reg((a)->io_base, (reg), (value)) 909 dsaf_write_reg((a)->io_base, (reg), (value))
910 910
911static inline u32 dsaf_read_reg(u8 *base, u32 reg) 911static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
912{ 912{
913 u8 __iomem *reg_addr = ACCESS_ONCE(base); 913 u8 __iomem *reg_addr = ACCESS_ONCE(base);
914 914
@@ -927,8 +927,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg)
927#define dsaf_set_bit(origin, shift, val) \ 927#define dsaf_set_bit(origin, shift, val) \
928 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 928 dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
929 929
930static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 930static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
931 u32 val) 931 u32 shift, u32 val)
932{ 932{
933 u32 origin = dsaf_read_reg(base, reg); 933 u32 origin = dsaf_read_reg(base, reg);
934 934
@@ -947,7 +947,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
947#define dsaf_get_bit(origin, shift) \ 947#define dsaf_get_bit(origin, shift) \
948 dsaf_get_field((origin), (1ull << (shift)), (shift)) 948 dsaf_get_field((origin), (1ull << (shift)), (shift))
949 949
950static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 950static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
951 u32 shift)
951{ 952{
952 u32 origin; 953 u32 origin;
953 954
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0ff8f01e57ee..1fd5ea82a9bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
567 goto init_adminq_exit; 567 goto init_adminq_exit;
568 } 568 }
569 569
570 /* initialize locks */
571 mutex_init(&hw->aq.asq_mutex);
572 mutex_init(&hw->aq.arq_mutex);
573
574 /* Set up register offsets */ 570 /* Set up register offsets */
575 i40e_adminq_init_regs(hw); 571 i40e_adminq_init_regs(hw);
576 572
@@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
664 i40e_shutdown_asq(hw); 660 i40e_shutdown_asq(hw);
665 i40e_shutdown_arq(hw); 661 i40e_shutdown_arq(hw);
666 662
667 /* destroy the locks */
668
669 if (hw->nvm_buff.va) 663 if (hw->nvm_buff.va)
670 i40e_free_virt_mem(hw, &hw->nvm_buff); 664 i40e_free_virt_mem(hw, &hw->nvm_buff);
671 665
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b825f978d441..4a9873ec28c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -10295,6 +10295,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10295 /* set up a default setting for link flow control */ 10295 /* set up a default setting for link flow control */
10296 pf->hw.fc.requested_mode = I40E_FC_NONE; 10296 pf->hw.fc.requested_mode = I40E_FC_NONE;
10297 10297
10298 /* set up the locks for the AQ, do this only once in probe
10299 * and destroy them only once in remove
10300 */
10301 mutex_init(&hw->aq.asq_mutex);
10302 mutex_init(&hw->aq.arq_mutex);
10303
10298 err = i40e_init_adminq(hw); 10304 err = i40e_init_adminq(hw);
10299 10305
10300 /* provide nvm, fw, api versions */ 10306 /* provide nvm, fw, api versions */
@@ -10697,7 +10703,6 @@ static void i40e_remove(struct pci_dev *pdev)
10697 set_bit(__I40E_DOWN, &pf->state); 10703 set_bit(__I40E_DOWN, &pf->state);
10698 del_timer_sync(&pf->service_timer); 10704 del_timer_sync(&pf->service_timer);
10699 cancel_work_sync(&pf->service_task); 10705 cancel_work_sync(&pf->service_task);
10700 i40e_fdir_teardown(pf);
10701 10706
10702 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 10707 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10703 i40e_free_vfs(pf); 10708 i40e_free_vfs(pf);
@@ -10740,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev)
10740 "Failed to destroy the Admin Queue resources: %d\n", 10745 "Failed to destroy the Admin Queue resources: %d\n",
10741 ret_code); 10746 ret_code);
10742 10747
10748 /* destroy the locks only once, here */
10749 mutex_destroy(&hw->aq.arq_mutex);
10750 mutex_destroy(&hw->aq.asq_mutex);
10751
10743 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 10752 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10744 i40e_clear_interrupt_scheme(pf); 10753 i40e_clear_interrupt_scheme(pf);
10745 for (i = 0; i < pf->num_alloc_vsi; i++) { 10754 for (i = 0; i < pf->num_alloc_vsi; i++) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index fd123ca60761..3f65e39b3fe4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
551 goto init_adminq_exit; 551 goto init_adminq_exit;
552 } 552 }
553 553
554 /* initialize locks */
555 mutex_init(&hw->aq.asq_mutex);
556 mutex_init(&hw->aq.arq_mutex);
557
558 /* Set up register offsets */ 554 /* Set up register offsets */
559 i40e_adminq_init_regs(hw); 555 i40e_adminq_init_regs(hw);
560 556
@@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
596 i40e_shutdown_asq(hw); 592 i40e_shutdown_asq(hw);
597 i40e_shutdown_arq(hw); 593 i40e_shutdown_arq(hw);
598 594
599 /* destroy the locks */
600
601 if (hw->nvm_buff.va) 595 if (hw->nvm_buff.va)
602 i40e_free_virt_mem(hw, &hw->nvm_buff); 596 i40e_free_virt_mem(hw, &hw->nvm_buff);
603 597
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index d962164dfb0f..99d2cffae0cd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2476,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2476 hw->bus.device = PCI_SLOT(pdev->devfn); 2476 hw->bus.device = PCI_SLOT(pdev->devfn);
2477 hw->bus.func = PCI_FUNC(pdev->devfn); 2477 hw->bus.func = PCI_FUNC(pdev->devfn);
2478 2478
2479 /* set up the locks for the AQ, do this only once in probe
2480 * and destroy them only once in remove
2481 */
2482 mutex_init(&hw->aq.asq_mutex);
2483 mutex_init(&hw->aq.arq_mutex);
2484
2479 INIT_LIST_HEAD(&adapter->mac_filter_list); 2485 INIT_LIST_HEAD(&adapter->mac_filter_list);
2480 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2486 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2481 2487
@@ -2629,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev)
2629 if (hw->aq.asq.count) 2635 if (hw->aq.asq.count)
2630 i40evf_shutdown_adminq(hw); 2636 i40evf_shutdown_adminq(hw);
2631 2637
2638 /* destroy the locks only once, here */
2639 mutex_destroy(&hw->aq.arq_mutex);
2640 mutex_destroy(&hw->aq.asq_mutex);
2641
2632 iounmap(hw->hw_addr); 2642 iounmap(hw->hw_addr);
2633 pci_release_regions(pdev); 2643 pci_release_regions(pdev);
2634 2644
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 47395ff5d908..aed8d029b23d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7920,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7920 */ 7920 */
7921 if (netif_running(dev)) 7921 if (netif_running(dev))
7922 ixgbe_close(dev); 7922 ixgbe_close(dev);
7923 else
7924 ixgbe_reset(adapter);
7925
7923 ixgbe_clear_interrupt_scheme(adapter); 7926 ixgbe_clear_interrupt_scheme(adapter);
7924 7927
7925#ifdef CONFIG_IXGBE_DCB 7928#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d9884fd15b45..a4beccf1fd46 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3413} 3413}
3414 3414
3415/* Free all buffers from the pool */ 3415/* Free all buffers from the pool */
3416static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 3416static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3417 struct mvpp2_bm_pool *bm_pool)
3417{ 3418{
3418 int i; 3419 int i;
3419 3420
3420 for (i = 0; i < bm_pool->buf_num; i++) { 3421 for (i = 0; i < bm_pool->buf_num; i++) {
3422 dma_addr_t buf_phys_addr;
3421 u32 vaddr; 3423 u32 vaddr;
3422 3424
3423 /* Get buffer virtual address (indirect access) */ 3425 /* Get buffer virtual address (indirect access) */
3424 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3426 buf_phys_addr = mvpp2_read(priv,
3427 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3425 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); 3428 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3429
3430 dma_unmap_single(dev, buf_phys_addr,
3431 bm_pool->buf_size, DMA_FROM_DEVICE);
3432
3426 if (!vaddr) 3433 if (!vaddr)
3427 break; 3434 break;
3428 dev_kfree_skb_any((struct sk_buff *)vaddr); 3435 dev_kfree_skb_any((struct sk_buff *)vaddr);
@@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3439{ 3446{
3440 u32 val; 3447 u32 val;
3441 3448
3442 mvpp2_bm_bufs_free(priv, bm_pool); 3449 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3443 if (bm_pool->buf_num) { 3450 if (bm_pool->buf_num) {
3444 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); 3451 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3445 return 0; 3452 return 0;
@@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3692 MVPP2_BM_LONG_BUF_NUM : 3699 MVPP2_BM_LONG_BUF_NUM :
3693 MVPP2_BM_SHORT_BUF_NUM; 3700 MVPP2_BM_SHORT_BUF_NUM;
3694 else 3701 else
3695 mvpp2_bm_bufs_free(port->priv, new_pool); 3702 mvpp2_bm_bufs_free(port->dev->dev.parent,
3703 port->priv, new_pool);
3696 3704
3697 new_pool->pkt_size = pkt_size; 3705 new_pool->pkt_size = pkt_size;
3698 3706
@@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3756 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3764 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3757 3765
3758 /* Update BM pool with new buffer size */ 3766 /* Update BM pool with new buffer size */
3759 mvpp2_bm_bufs_free(port->priv, port_pool); 3767 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3760 if (port_pool->buf_num) { 3768 if (port_pool->buf_num) {
3761 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); 3769 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3762 return -EIO; 3770 return -EIO;
@@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4401 4409
4402 mvpp2_txq_inc_get(txq_pcpu); 4410 mvpp2_txq_inc_get(txq_pcpu);
4403 4411
4404 if (!skb)
4405 continue;
4406
4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr, 4412 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4408 skb_headlen(skb), DMA_TO_DEVICE); 4413 skb_headlen(skb), DMA_TO_DEVICE);
4414 if (!skb)
4415 continue;
4409 dev_kfree_skb_any(skb); 4416 dev_kfree_skb_any(skb);
4410 } 4417 }
4411} 4418}
@@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5092 struct mvpp2_rx_queue *rxq) 5099 struct mvpp2_rx_queue *rxq)
5093{ 5100{
5094 struct net_device *dev = port->dev; 5101 struct net_device *dev = port->dev;
5095 int rx_received, rx_filled, i; 5102 int rx_received;
5103 int rx_done = 0;
5096 u32 rcvd_pkts = 0; 5104 u32 rcvd_pkts = 0;
5097 u32 rcvd_bytes = 0; 5105 u32 rcvd_bytes = 0;
5098 5106
@@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5101 if (rx_todo > rx_received) 5109 if (rx_todo > rx_received)
5102 rx_todo = rx_received; 5110 rx_todo = rx_received;
5103 5111
5104 rx_filled = 0; 5112 while (rx_done < rx_todo) {
5105 for (i = 0; i < rx_todo; i++) {
5106 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 5113 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5107 struct mvpp2_bm_pool *bm_pool; 5114 struct mvpp2_bm_pool *bm_pool;
5108 struct sk_buff *skb; 5115 struct sk_buff *skb;
5116 dma_addr_t phys_addr;
5109 u32 bm, rx_status; 5117 u32 bm, rx_status;
5110 int pool, rx_bytes, err; 5118 int pool, rx_bytes, err;
5111 5119
5112 rx_filled++; 5120 rx_done++;
5113 rx_status = rx_desc->status; 5121 rx_status = rx_desc->status;
5114 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; 5122 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5123 phys_addr = rx_desc->buf_phys_addr;
5115 5124
5116 bm = mvpp2_bm_cookie_build(rx_desc); 5125 bm = mvpp2_bm_cookie_build(rx_desc);
5117 pool = mvpp2_bm_cookie_pool_get(bm); 5126 pool = mvpp2_bm_cookie_pool_get(bm);
@@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5128 * comprised by the RX descriptor. 5137 * comprised by the RX descriptor.
5129 */ 5138 */
5130 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5139 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5140 err_drop_frame:
5131 dev->stats.rx_errors++; 5141 dev->stats.rx_errors++;
5132 mvpp2_rx_error(port, rx_desc); 5142 mvpp2_rx_error(port, rx_desc);
5143 /* Return the buffer to the pool */
5133 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, 5144 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5134 rx_desc->buf_cookie); 5145 rx_desc->buf_cookie);
5135 continue; 5146 continue;
@@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5137 5148
5138 skb = (struct sk_buff *)rx_desc->buf_cookie; 5149 skb = (struct sk_buff *)rx_desc->buf_cookie;
5139 5150
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5152 if (err) {
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5154 goto err_drop_frame;
5155 }
5156
5157 dma_unmap_single(dev->dev.parent, phys_addr,
5158 bm_pool->buf_size, DMA_FROM_DEVICE);
5159
5140 rcvd_pkts++; 5160 rcvd_pkts++;
5141 rcvd_bytes += rx_bytes; 5161 rcvd_bytes += rx_bytes;
5142 atomic_inc(&bm_pool->in_use); 5162 atomic_inc(&bm_pool->in_use);
@@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5147 mvpp2_rx_csum(port, rx_status, skb); 5167 mvpp2_rx_csum(port, rx_status, skb);
5148 5168
5149 napi_gro_receive(&port->napi, skb); 5169 napi_gro_receive(&port->napi, skb);
5150
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5152 if (err) {
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5154 rx_filled--;
5155 }
5156 } 5170 }
5157 5171
5158 if (rcvd_pkts) { 5172 if (rcvd_pkts) {
@@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5166 5180
5167 /* Update Rx queue management counters */ 5181 /* Update Rx queue management counters */
5168 wmb(); 5182 wmb();
5169 mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); 5183 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5170 5184
5171 return rx_todo; 5185 return rx_todo;
5172} 5186}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 2177e56ed0be..d48d5793407d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
1010 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && 1010 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1011 smp->method == IB_MGMT_METHOD_GET) || network_view) { 1011 smp->method == IB_MGMT_METHOD_GET) || network_view) {
1012 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", 1012 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013 slave, smp->method, smp->mgmt_class, 1013 slave, smp->mgmt_class, smp->method,
1014 network_view ? "Network" : "Host", 1014 network_view ? "Network" : "Host",
1015 be16_to_cpu(smp->attr_id)); 1015 be16_to_cpu(smp->attr_id));
1016 return -EPERM; 1016 return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 6fec3e993d02..cad6c44df91c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4306,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4306 return -EOPNOTSUPP; 4306 return -EOPNOTSUPP;
4307 4307
4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4309 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); 4309 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4310 if (ctrl->port <= 0) 4310 if (err <= 0)
4311 return -EINVAL; 4311 return -EINVAL;
4312 ctrl->port = err;
4312 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4313 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4313 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4314 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4314 if (err) { 4315 if (err) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index ac17d8669b1a..1292c360390c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -299,6 +299,7 @@ struct qed_hwfn {
299 299
300 /* Flag indicating whether interrupts are enabled or not*/ 300 /* Flag indicating whether interrupts are enabled or not*/
301 bool b_int_enabled; 301 bool b_int_enabled;
302 bool b_int_requested;
302 303
303 struct qed_mcp_info *mcp_info; 304 struct qed_mcp_info *mcp_info;
304 305
@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
491 u32 input_len, u8 *input_buf, 492 u32 input_len, u8 *input_buf,
492 u32 max_size, u8 *unzip_buf); 493 u32 max_size, u8 *unzip_buf);
493 494
495int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
496
494#define QED_ETH_INTERFACE_VERSION 300 497#define QED_ETH_INTERFACE_VERSION 300
495 498
496#endif /* _QED_H */ 499#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 803b190ccada..817bbd5476ff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1385,52 +1385,63 @@ err0:
1385 return rc; 1385 return rc;
1386} 1386}
1387 1387
1388static u32 qed_hw_bar_size(struct qed_dev *cdev, 1388static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
1389 u8 bar_id) 1389 u8 bar_id)
1390{ 1390{
1391 u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); 1391 u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
1392 : PGLUE_B_REG_PF_BAR1_SIZE);
1393 u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
1392 1394
1393 return size / cdev->num_hwfns; 1395 /* Get the BAR size(in KB) from hardware given val */
1396 return 1 << (val + 15);
1394} 1397}
1395 1398
1396int qed_hw_prepare(struct qed_dev *cdev, 1399int qed_hw_prepare(struct qed_dev *cdev,
1397 int personality) 1400 int personality)
1398{ 1401{
1399 int rc, i; 1402 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1403 int rc;
1400 1404
1401 /* Store the precompiled init data ptrs */ 1405 /* Store the precompiled init data ptrs */
1402 qed_init_iro_array(cdev); 1406 qed_init_iro_array(cdev);
1403 1407
1404 /* Initialize the first hwfn - will learn number of hwfns */ 1408 /* Initialize the first hwfn - will learn number of hwfns */
1405 rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, 1409 rc = qed_hw_prepare_single(p_hwfn,
1410 cdev->regview,
1406 cdev->doorbells, personality); 1411 cdev->doorbells, personality);
1407 if (rc) 1412 if (rc)
1408 return rc; 1413 return rc;
1409 1414
1410 personality = cdev->hwfns[0].hw_info.personality; 1415 personality = p_hwfn->hw_info.personality;
1411 1416
1412 /* Initialize the rest of the hwfns */ 1417 /* Initialize the rest of the hwfns */
1413 for (i = 1; i < cdev->num_hwfns; i++) { 1418 if (cdev->num_hwfns > 1) {
1414 void __iomem *p_regview, *p_doorbell; 1419 void __iomem *p_regview, *p_doorbell;
1420 u8 __iomem *addr;
1421
1422 /* adjust bar offset for second engine */
1423 addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
1424 p_regview = addr;
1415 1425
1416 p_regview = cdev->regview + 1426 /* adjust doorbell bar offset for second engine */
1417 i * qed_hw_bar_size(cdev, 0); 1427 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
1418 p_doorbell = cdev->doorbells + 1428 p_doorbell = addr;
1419 i * qed_hw_bar_size(cdev, 1); 1429
1420 rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, 1430 /* prepare second hw function */
1431 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1421 p_doorbell, personality); 1432 p_doorbell, personality);
1433
1434 /* in case of error, need to free the previously
1435 * initiliazed hwfn 0.
1436 */
1422 if (rc) { 1437 if (rc) {
1423 /* Cleanup previously initialized hwfns */ 1438 qed_init_free(p_hwfn);
1424 while (--i >= 0) { 1439 qed_mcp_free(p_hwfn);
1425 qed_init_free(&cdev->hwfns[i]); 1440 qed_hw_hwfn_free(p_hwfn);
1426 qed_mcp_free(&cdev->hwfns[i]);
1427 qed_hw_hwfn_free(&cdev->hwfns[i]);
1428 }
1429 return rc;
1430 } 1441 }
1431 } 1442 }
1432 1443
1433 return 0; 1444 return rc;
1434} 1445}
1435 1446
1436void qed_hw_remove(struct qed_dev *cdev) 1447void qed_hw_remove(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index de50e84902af..9cc9d62c1fec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
784} 784}
785 785
786void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 786int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
787 struct qed_ptt *p_ptt, 787 enum qed_int_mode int_mode)
788 enum qed_int_mode int_mode)
789{ 788{
790 int i; 789 int rc, i;
791
792 p_hwfn->b_int_enabled = 1;
793 790
794 /* Mask non-link attentions */ 791 /* Mask non-link attentions */
795 for (i = 0; i < 9; i++) 792 for (i = 0; i < 9; i++)
796 qed_wr(p_hwfn, p_ptt, 793 qed_wr(p_hwfn, p_ptt,
797 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); 794 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
798 795
799 /* Enable interrupt Generation */
800 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
801
802 /* Configure AEU signal change to produce attentions for link */ 796 /* Configure AEU signal change to produce attentions for link */
803 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 797 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
804 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 798 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
808 802
809 /* Unmask AEU signals toward IGU */ 803 /* Unmask AEU signals toward IGU */
810 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 804 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
805 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
806 rc = qed_slowpath_irq_req(p_hwfn);
807 if (rc != 0) {
808 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
809 return -EINVAL;
810 }
811 p_hwfn->b_int_requested = true;
812 }
813 /* Enable interrupt Generation */
814 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
815 p_hwfn->b_int_enabled = 1;
816
817 return rc;
811} 818}
812 819
813void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, 820void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
1127 1134
1128 return info->igu_sb_cnt; 1135 return info->igu_sb_cnt;
1129} 1136}
1137
1138void qed_int_disable_post_isr_release(struct qed_dev *cdev)
1139{
1140 int i;
1141
1142 for_each_hwfn(cdev, i)
1143 cdev->hwfns[i].b_int_requested = false;
1144}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 16b57518e706..51e0b09a7f47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
169 int *p_iov_blks); 169 int *p_iov_blks);
170 170
171/** 171/**
172 * @file 172 * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
173 * release. The API need to be called after releasing all slowpath IRQs
174 * of the device.
175 *
176 * @param cdev
173 * 177 *
174 * @brief Interrupt handler
175 */ 178 */
179void qed_int_disable_post_isr_release(struct qed_dev *cdev);
176 180
177#define QED_CAU_DEF_RX_TIMER_RES 0 181#define QED_CAU_DEF_RX_TIMER_RES 0
178#define QED_CAU_DEF_TX_TIMER_RES 0 182#define QED_CAU_DEF_TX_TIMER_RES 0
@@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
366 * @param p_hwfn 370 * @param p_hwfn
367 * @param p_ptt 371 * @param p_ptt
368 * @param int_mode 372 * @param int_mode
373 *
374 * @return int
369 */ 375 */
370void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 376int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
371 struct qed_ptt *p_ptt, 377 enum qed_int_mode int_mode);
372 enum qed_int_mode int_mode);
373 378
374/** 379/**
375 * @brief - Initialize CAU status block entry 380 * @brief - Initialize CAU status block entry
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 947c7af72b25..174f7341c5c3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
476 return rc; 476 return rc;
477} 477}
478 478
479static int qed_slowpath_irq_req(struct qed_dev *cdev) 479int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
480{ 480{
481 int i = 0, rc = 0; 481 struct qed_dev *cdev = hwfn->cdev;
482 int rc = 0;
483 u8 id;
482 484
483 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 485 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
484 /* Request all the slowpath MSI-X vectors */ 486 id = hwfn->my_id;
485 for (i = 0; i < cdev->num_hwfns; i++) { 487 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
486 snprintf(cdev->hwfns[i].name, NAME_SIZE, 488 id, cdev->pdev->bus->number,
487 "sp-%d-%02x:%02x.%02x", 489 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
488 i, cdev->pdev->bus->number, 490 rc = request_irq(cdev->int_params.msix_table[id].vector,
489 PCI_SLOT(cdev->pdev->devfn), 491 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
490 cdev->hwfns[i].abs_pf_id); 492 if (!rc)
491 493 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
492 rc = request_irq(cdev->int_params.msix_table[i].vector,
493 qed_msix_sp_int, 0,
494 cdev->hwfns[i].name,
495 cdev->hwfns[i].sp_dpc);
496 if (rc)
497 break;
498
499 DP_VERBOSE(&cdev->hwfns[i],
500 (NETIF_MSG_INTR | QED_MSG_SP),
501 "Requested slowpath MSI-X\n"); 494 "Requested slowpath MSI-X\n");
502 }
503
504 if (i != cdev->num_hwfns) {
505 /* Free already request MSI-X vectors */
506 for (i--; i >= 0; i--) {
507 unsigned int vec =
508 cdev->int_params.msix_table[i].vector;
509 synchronize_irq(vec);
510 free_irq(cdev->int_params.msix_table[i].vector,
511 cdev->hwfns[i].sp_dpc);
512 }
513 }
514 } else { 495 } else {
515 unsigned long flags = 0; 496 unsigned long flags = 0;
516 497
@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
534 515
535 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 516 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
536 for_each_hwfn(cdev, i) { 517 for_each_hwfn(cdev, i) {
518 if (!cdev->hwfns[i].b_int_requested)
519 break;
537 synchronize_irq(cdev->int_params.msix_table[i].vector); 520 synchronize_irq(cdev->int_params.msix_table[i].vector);
538 free_irq(cdev->int_params.msix_table[i].vector, 521 free_irq(cdev->int_params.msix_table[i].vector,
539 cdev->hwfns[i].sp_dpc); 522 cdev->hwfns[i].sp_dpc);
540 } 523 }
541 } else { 524 } else {
542 free_irq(cdev->pdev->irq, cdev); 525 if (QED_LEADING_HWFN(cdev)->b_int_requested)
526 free_irq(cdev->pdev->irq, cdev);
543 } 527 }
528 qed_int_disable_post_isr_release(cdev);
544} 529}
545 530
546static int qed_nic_stop(struct qed_dev *cdev) 531static int qed_nic_stop(struct qed_dev *cdev)
@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev,
765 if (rc) 750 if (rc)
766 goto err1; 751 goto err1;
767 752
768 /* Request the slowpath IRQ */
769 rc = qed_slowpath_irq_req(cdev);
770 if (rc)
771 goto err2;
772
773 /* Allocate stream for unzipping */ 753 /* Allocate stream for unzipping */
774 rc = qed_alloc_stream_mem(cdev); 754 rc = qed_alloc_stream_mem(cdev);
775 if (rc) { 755 if (rc) {
776 DP_NOTICE(cdev, "Failed to allocate stream memory\n"); 756 DP_NOTICE(cdev, "Failed to allocate stream memory\n");
777 goto err3; 757 goto err2;
778 } 758 }
779 759
780 /* Start the slowpath */ 760 /* Start the slowpath */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 7a5ce5914ace..e8df12335a97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -363,4 +363,8 @@
363 0x7 << 0) 363 0x7 << 0)
364#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 364#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
365 0 365 0
366#define PGLUE_B_REG_PF_BAR0_SIZE \
367 0x2aae60UL
368#define PGLUE_B_REG_PF_BAR1_SIZE \
369 0x2aae64UL
366#endif 370#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 31a1f1eb4f56..287fadfab52d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -124,8 +124,12 @@ struct qed_spq {
124 dma_addr_t p_phys; 124 dma_addr_t p_phys;
125 struct qed_spq_entry *p_virt; 125 struct qed_spq_entry *p_virt;
126 126
127 /* Used as index for completions (returns on EQ by FW) */ 127#define SPQ_RING_SIZE \
128 u16 echo_idx; 128 (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
129
130 /* Bitmap for handling out-of-order completions */
131 DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
132 u8 comp_bitmap_idx;
129 133
130 /* Statistics */ 134 /* Statistics */
131 u32 unlimited_pending_count; 135 u32 unlimited_pending_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7c0b8459666e..3dd548ab8df1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -112,8 +112,6 @@ static int
112qed_spq_fill_entry(struct qed_hwfn *p_hwfn, 112qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113 struct qed_spq_entry *p_ent) 113 struct qed_spq_entry *p_ent)
114{ 114{
115 p_ent->elem.hdr.echo = 0;
116 p_hwfn->p_spq->echo_idx++;
117 p_ent->flags = 0; 115 p_ent->flags = 0;
118 116
119 switch (p_ent->comp_mode) { 117 switch (p_ent->comp_mode) {
@@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
195 struct qed_spq *p_spq, 193 struct qed_spq *p_spq,
196 struct qed_spq_entry *p_ent) 194 struct qed_spq_entry *p_ent)
197{ 195{
198 struct qed_chain *p_chain = &p_hwfn->p_spq->chain; 196 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
197 u16 echo = qed_chain_get_prod_idx(p_chain);
199 struct slow_path_element *elem; 198 struct slow_path_element *elem;
200 struct core_db_data db; 199 struct core_db_data db;
201 200
201 p_ent->elem.hdr.echo = cpu_to_le16(echo);
202 elem = qed_chain_produce(p_chain); 202 elem = qed_chain_produce(p_chain);
203 if (!elem) { 203 if (!elem) {
204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); 204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
437 p_spq->comp_count = 0; 437 p_spq->comp_count = 0;
438 p_spq->comp_sent_count = 0; 438 p_spq->comp_sent_count = 0;
439 p_spq->unlimited_pending_count = 0; 439 p_spq->unlimited_pending_count = 0;
440 p_spq->echo_idx = 0; 440
441 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
442 p_spq->comp_bitmap_idx = 0;
441 443
442 /* SPQ cid, cannot fail */ 444 /* SPQ cid, cannot fail */
443 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); 445 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
@@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
582 struct qed_spq *p_spq = p_hwfn->p_spq; 584 struct qed_spq *p_spq = p_hwfn->p_spq;
583 585
584 if (p_ent->queue == &p_spq->unlimited_pending) { 586 if (p_ent->queue == &p_spq->unlimited_pending) {
585 struct qed_spq_entry *p_en2;
586 587
587 if (list_empty(&p_spq->free_pool)) { 588 if (list_empty(&p_spq->free_pool)) {
588 list_add_tail(&p_ent->list, &p_spq->unlimited_pending); 589 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
589 p_spq->unlimited_pending_count++; 590 p_spq->unlimited_pending_count++;
590 591
591 return 0; 592 return 0;
592 } 593 } else {
594 struct qed_spq_entry *p_en2;
593 595
594 p_en2 = list_first_entry(&p_spq->free_pool, 596 p_en2 = list_first_entry(&p_spq->free_pool,
595 struct qed_spq_entry, 597 struct qed_spq_entry,
596 list); 598 list);
597 list_del(&p_en2->list); 599 list_del(&p_en2->list);
600
601 /* Copy the ring element physical pointer to the new
602 * entry, since we are about to override the entire ring
603 * entry and don't want to lose the pointer.
604 */
605 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
598 606
599 /* Strcut assignment */ 607 *p_en2 = *p_ent;
600 *p_en2 = *p_ent;
601 608
602 kfree(p_ent); 609 kfree(p_ent);
603 610
604 p_ent = p_en2; 611 p_ent = p_en2;
612 }
605 } 613 }
606 614
607 /* entry is to be placed in 'pending' queue */ 615 /* entry is to be placed in 'pending' queue */
@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
777 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, 785 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
778 list) { 786 list) {
779 if (p_ent->elem.hdr.echo == echo) { 787 if (p_ent->elem.hdr.echo == echo) {
788 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
789
780 list_del(&p_ent->list); 790 list_del(&p_ent->list);
781 791
782 qed_chain_return_produced(&p_spq->chain); 792 /* Avoid overriding of SPQ entries when getting
793 * out-of-order completions, by marking the completions
794 * in a bitmap and increasing the chain consumer only
795 * for the first successive completed entries.
796 */
797 bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
798
799 while (test_bit(p_spq->comp_bitmap_idx,
800 p_spq->p_comp_bitmap)) {
801 bitmap_clear(p_spq->p_comp_bitmap,
802 p_spq->comp_bitmap_idx,
803 SPQ_RING_SIZE);
804 p_spq->comp_bitmap_idx++;
805 qed_chain_return_produced(&p_spq->chain);
806 }
807
783 p_spq->comp_count++; 808 p_spq->comp_count++;
784 found = p_ent; 809 found = p_ent;
785 break; 810 break;
786 } 811 }
812
813 /* This is relatively uncommon - depends on scenarios
814 * which have mutliple per-PF sent ramrods.
815 */
816 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
817 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
818 le16_to_cpu(echo),
819 le16_to_cpu(p_ent->elem.hdr.echo));
787 } 820 }
788 821
789 /* Release lock before callback, as callback may post 822 /* Release lock before callback, as callback may post
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index be7d7a62cc0d..b1a452f291ee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -246,7 +246,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
246 u32 state; 246 u32 state;
247 247
248 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 248 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
249 while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { 249 while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
250 idc->vnic_wait_limit--;
250 msleep(1000); 251 msleep(1000);
251 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 252 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
252 } 253 }
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 02b7115b6aaa..997976426799 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
4211 4211
4212 /* Wait for an outstanding reset to complete. */ 4212 /* Wait for an outstanding reset to complete. */
4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4214 int i = 3; 4214 int i = 4;
4215 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4215
4216 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4216 netif_err(qdev, ifup, qdev->ndev, 4217 netif_err(qdev, ifup, qdev->ndev,
4217 "Waiting for adapter UP...\n"); 4218 "Waiting for adapter UP...\n");
4218 ssleep(1); 4219 ssleep(1);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ddb2c6c6ec94..689a4a5c8dcf 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
736 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", 736 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
737 jiffies, jiffies - dev->trans_start); 737 jiffies, jiffies - dev->trans_start);
738 qca->net_dev->stats.tx_errors++; 738 qca->net_dev->stats.tx_errors++;
739 /* wake the queue if there is room */ 739 /* Trigger tx queue flush and QCA7000 reset */
740 if (qcaspi_tx_ring_has_space(&qca->txr)) 740 qca->sync = QCASPI_SYNC_UNKNOWN;
741 netif_wake_queue(dev);
742} 741}
743 742
744static int 743static int
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ed5da4d47668..467d41698fd5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -905,6 +905,9 @@ static int ravb_phy_init(struct net_device *ndev)
905 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); 905 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
906 } 906 }
907 907
908 /* 10BASE is not supported */
909 phydev->supported &= ~PHY_10BT_FEATURES;
910
908 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", 911 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
909 phydev->addr, phydev->irq, phydev->drv->name); 912 phydev->addr, phydev->irq, phydev->drv->name);
910 913
@@ -1037,7 +1040,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1037 "rx_queue_1_mcast_packets", 1040 "rx_queue_1_mcast_packets",
1038 "rx_queue_1_errors", 1041 "rx_queue_1_errors",
1039 "rx_queue_1_crc_errors", 1042 "rx_queue_1_crc_errors",
1040 "rx_queue_1_frame_errors_", 1043 "rx_queue_1_frame_errors",
1041 "rx_queue_1_length_errors", 1044 "rx_queue_1_length_errors",
1042 "rx_queue_1_missed_errors", 1045 "rx_queue_1_missed_errors",
1043 "rx_queue_1_over_errors", 1046 "rx_queue_1_over_errors",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e7bab7909ed9..a0eaf50499a2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -52,6 +52,8 @@
52 NETIF_MSG_RX_ERR| \ 52 NETIF_MSG_RX_ERR| \
53 NETIF_MSG_TX_ERR) 53 NETIF_MSG_TX_ERR)
54 54
55#define SH_ETH_OFFSET_INVALID ((u16)~0)
56
55#define SH_ETH_OFFSET_DEFAULTS \ 57#define SH_ETH_OFFSET_DEFAULTS \
56 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID 58 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
57 59
@@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
404static void sh_eth_rcv_snd_disable(struct net_device *ndev); 406static void sh_eth_rcv_snd_disable(struct net_device *ndev);
405static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); 407static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
406 408
409static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
410{
411 struct sh_eth_private *mdp = netdev_priv(ndev);
412 u16 offset = mdp->reg_offset[enum_index];
413
414 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
415 return;
416
417 iowrite32(data, mdp->addr + offset);
418}
419
420static u32 sh_eth_read(struct net_device *ndev, int enum_index)
421{
422 struct sh_eth_private *mdp = netdev_priv(ndev);
423 u16 offset = mdp->reg_offset[enum_index];
424
425 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
426 return ~0U;
427
428 return ioread32(mdp->addr + offset);
429}
430
407static bool sh_eth_is_gether(struct sh_eth_private *mdp) 431static bool sh_eth_is_gether(struct sh_eth_private *mdp)
408{ 432{
409 return mdp->reg_offset == sh_eth_offset_gigabit; 433 return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1172,7 +1196,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1172 break; 1196 break;
1173 } 1197 }
1174 mdp->rx_skbuff[i] = skb; 1198 mdp->rx_skbuff[i] = skb;
1175 rxdesc->addr = dma_addr; 1199 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1176 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1200 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1177 1201
1178 /* Rx descriptor address set */ 1202 /* Rx descriptor address set */
@@ -1403,7 +1427,8 @@ static int sh_eth_txfree(struct net_device *ndev)
1403 entry, edmac_to_cpu(mdp, txdesc->status)); 1427 entry, edmac_to_cpu(mdp, txdesc->status));
1404 /* Free the original skb. */ 1428 /* Free the original skb. */
1405 if (mdp->tx_skbuff[entry]) { 1429 if (mdp->tx_skbuff[entry]) {
1406 dma_unmap_single(&ndev->dev, txdesc->addr, 1430 dma_unmap_single(&ndev->dev,
1431 edmac_to_cpu(mdp, txdesc->addr),
1407 txdesc->buffer_length, DMA_TO_DEVICE); 1432 txdesc->buffer_length, DMA_TO_DEVICE);
1408 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1433 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1409 mdp->tx_skbuff[entry] = NULL; 1434 mdp->tx_skbuff[entry] = NULL;
@@ -1462,6 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1462 if (mdp->cd->shift_rd0) 1487 if (mdp->cd->shift_rd0)
1463 desc_status >>= 16; 1488 desc_status >>= 16;
1464 1489
1490 skb = mdp->rx_skbuff[entry];
1465 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1491 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1466 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1492 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1467 ndev->stats.rx_errors++; 1493 ndev->stats.rx_errors++;
@@ -1477,16 +1503,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1477 ndev->stats.rx_missed_errors++; 1503 ndev->stats.rx_missed_errors++;
1478 if (desc_status & RD_RFS10) 1504 if (desc_status & RD_RFS10)
1479 ndev->stats.rx_over_errors++; 1505 ndev->stats.rx_over_errors++;
1480 } else { 1506 } else if (skb) {
1507 dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
1481 if (!mdp->cd->hw_swap) 1508 if (!mdp->cd->hw_swap)
1482 sh_eth_soft_swap( 1509 sh_eth_soft_swap(
1483 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1510 phys_to_virt(ALIGN(dma_addr, 4)),
1484 pkt_len + 2); 1511 pkt_len + 2);
1485 skb = mdp->rx_skbuff[entry];
1486 mdp->rx_skbuff[entry] = NULL; 1512 mdp->rx_skbuff[entry] = NULL;
1487 if (mdp->cd->rpadir) 1513 if (mdp->cd->rpadir)
1488 skb_reserve(skb, NET_IP_ALIGN); 1514 skb_reserve(skb, NET_IP_ALIGN);
1489 dma_unmap_single(&ndev->dev, rxdesc->addr, 1515 dma_unmap_single(&ndev->dev, dma_addr,
1490 ALIGN(mdp->rx_buf_sz, 32), 1516 ALIGN(mdp->rx_buf_sz, 32),
1491 DMA_FROM_DEVICE); 1517 DMA_FROM_DEVICE);
1492 skb_put(skb, pkt_len); 1518 skb_put(skb, pkt_len);
@@ -1523,7 +1549,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 mdp->rx_skbuff[entry] = skb; 1549 mdp->rx_skbuff[entry] = skb;
1524 1550
1525 skb_checksum_none_assert(skb); 1551 skb_checksum_none_assert(skb);
1526 rxdesc->addr = dma_addr; 1552 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1527 } 1553 }
1528 dma_wmb(); /* RACT bit must be set after all the above writes */ 1554 dma_wmb(); /* RACT bit must be set after all the above writes */
1529 if (entry >= mdp->num_rx_ring - 1) 1555 if (entry >= mdp->num_rx_ring - 1)
@@ -2331,8 +2357,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2331 /* Free all the skbuffs in the Rx queue. */ 2357 /* Free all the skbuffs in the Rx queue. */
2332 for (i = 0; i < mdp->num_rx_ring; i++) { 2358 for (i = 0; i < mdp->num_rx_ring; i++) {
2333 rxdesc = &mdp->rx_ring[i]; 2359 rxdesc = &mdp->rx_ring[i];
2334 rxdesc->status = 0; 2360 rxdesc->status = cpu_to_edmac(mdp, 0);
2335 rxdesc->addr = 0xBADF00D0; 2361 rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
2336 dev_kfree_skb(mdp->rx_skbuff[i]); 2362 dev_kfree_skb(mdp->rx_skbuff[i]);
2337 mdp->rx_skbuff[i] = NULL; 2363 mdp->rx_skbuff[i] = NULL;
2338 } 2364 }
@@ -2350,6 +2376,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2350{ 2376{
2351 struct sh_eth_private *mdp = netdev_priv(ndev); 2377 struct sh_eth_private *mdp = netdev_priv(ndev);
2352 struct sh_eth_txdesc *txdesc; 2378 struct sh_eth_txdesc *txdesc;
2379 dma_addr_t dma_addr;
2353 u32 entry; 2380 u32 entry;
2354 unsigned long flags; 2381 unsigned long flags;
2355 2382
@@ -2372,14 +2399,14 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2372 txdesc = &mdp->tx_ring[entry]; 2399 txdesc = &mdp->tx_ring[entry];
2373 /* soft swap. */ 2400 /* soft swap. */
2374 if (!mdp->cd->hw_swap) 2401 if (!mdp->cd->hw_swap)
2375 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2402 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2376 skb->len + 2); 2403 dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2377 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2404 DMA_TO_DEVICE);
2378 DMA_TO_DEVICE); 2405 if (dma_mapping_error(&ndev->dev, dma_addr)) {
2379 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2380 kfree_skb(skb); 2406 kfree_skb(skb);
2381 return NETDEV_TX_OK; 2407 return NETDEV_TX_OK;
2382 } 2408 }
2409 txdesc->addr = cpu_to_edmac(mdp, dma_addr);
2383 txdesc->buffer_length = skb->len; 2410 txdesc->buffer_length = skb->len;
2384 2411
2385 dma_wmb(); /* TACT bit must be set after all the above writes */ 2412 dma_wmb(); /* TACT bit must be set after all the above writes */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 50382b1c9ddc..26ad1cf0bcf1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -546,31 +546,6 @@ static inline void sh_eth_soft_swap(char *src, int len)
546#endif 546#endif
547} 547}
548 548
549#define SH_ETH_OFFSET_INVALID ((u16) ~0)
550
551static inline void sh_eth_write(struct net_device *ndev, u32 data,
552 int enum_index)
553{
554 struct sh_eth_private *mdp = netdev_priv(ndev);
555 u16 offset = mdp->reg_offset[enum_index];
556
557 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
558 return;
559
560 iowrite32(data, mdp->addr + offset);
561}
562
563static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
564{
565 struct sh_eth_private *mdp = netdev_priv(ndev);
566 u16 offset = mdp->reg_offset[enum_index];
567
568 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
569 return ~0U;
570
571 return ioread32(mdp->addr + offset);
572}
573
574static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, 549static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
575 int enum_index) 550 int enum_index)
576{ 551{
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index bc6d21b471be..e6a084a6be12 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3299,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
3299 3299
3300 new_spec.priority = EFX_FILTER_PRI_AUTO; 3300 new_spec.priority = EFX_FILTER_PRI_AUTO;
3301 new_spec.flags = (EFX_FILTER_FLAG_RX | 3301 new_spec.flags = (EFX_FILTER_FLAG_RX |
3302 EFX_FILTER_FLAG_RX_RSS); 3302 (efx_rss_enabled(efx) ?
3303 EFX_FILTER_FLAG_RX_RSS : 0));
3303 new_spec.dmaq_id = 0; 3304 new_spec.dmaq_id = 0;
3304 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; 3305 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3305 rc = efx_ef10_filter_push(efx, &new_spec, 3306 rc = efx_ef10_filter_push(efx, &new_spec,
@@ -3921,6 +3922,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3921{ 3922{
3922 struct efx_ef10_filter_table *table = efx->filter_state; 3923 struct efx_ef10_filter_table *table = efx->filter_state;
3923 struct efx_ef10_dev_addr *addr_list; 3924 struct efx_ef10_dev_addr *addr_list;
3925 enum efx_filter_flags filter_flags;
3924 struct efx_filter_spec spec; 3926 struct efx_filter_spec spec;
3925 u8 baddr[ETH_ALEN]; 3927 u8 baddr[ETH_ALEN];
3926 unsigned int i, j; 3928 unsigned int i, j;
@@ -3935,11 +3937,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3935 addr_count = table->dev_uc_count; 3937 addr_count = table->dev_uc_count;
3936 } 3938 }
3937 3939
3940 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
3941
3938 /* Insert/renew filters */ 3942 /* Insert/renew filters */
3939 for (i = 0; i < addr_count; i++) { 3943 for (i = 0; i < addr_count; i++) {
3940 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3944 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3941 EFX_FILTER_FLAG_RX_RSS,
3942 0);
3943 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3945 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3944 addr_list[i].addr); 3946 addr_list[i].addr);
3945 rc = efx_ef10_filter_insert(efx, &spec, true); 3947 rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -3968,9 +3970,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3968 3970
3969 if (multicast && rollback) { 3971 if (multicast && rollback) {
3970 /* Also need an Ethernet broadcast filter */ 3972 /* Also need an Ethernet broadcast filter */
3971 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3973 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3972 EFX_FILTER_FLAG_RX_RSS,
3973 0);
3974 eth_broadcast_addr(baddr); 3974 eth_broadcast_addr(baddr);
3975 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); 3975 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
3976 rc = efx_ef10_filter_insert(efx, &spec, true); 3976 rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -4000,13 +4000,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4000{ 4000{
4001 struct efx_ef10_filter_table *table = efx->filter_state; 4001 struct efx_ef10_filter_table *table = efx->filter_state;
4002 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4002 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4003 enum efx_filter_flags filter_flags;
4003 struct efx_filter_spec spec; 4004 struct efx_filter_spec spec;
4004 u8 baddr[ETH_ALEN]; 4005 u8 baddr[ETH_ALEN];
4005 int rc; 4006 int rc;
4006 4007
4007 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4008 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4008 EFX_FILTER_FLAG_RX_RSS, 4009
4009 0); 4010 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4010 4011
4011 if (multicast) 4012 if (multicast)
4012 efx_filter_set_mc_def(&spec); 4013 efx_filter_set_mc_def(&spec);
@@ -4023,8 +4024,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4023 if (!nic_data->workaround_26807) { 4024 if (!nic_data->workaround_26807) {
4024 /* Also need an Ethernet broadcast filter */ 4025 /* Also need an Ethernet broadcast filter */
4025 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4026 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
4026 EFX_FILTER_FLAG_RX_RSS, 4027 filter_flags, 0);
4027 0);
4028 eth_broadcast_addr(baddr); 4028 eth_broadcast_addr(baddr);
4029 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 4029 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
4030 baddr); 4030 baddr);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 1aaf76c1ace8..10827476bc0b 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
76#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ 76#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
77 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 77 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
78 78
79static inline bool efx_rss_enabled(struct efx_nic *efx)
80{
81 return efx->rss_spread > 1;
82}
83
79/* Filters */ 84/* Filters */
80 85
81void efx_mac_reconfigure(struct efx_nic *efx); 86void efx_mac_reconfigure(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5a1c5a8f278a..133e9e35be9e 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2242 */ 2242 */
2243 spec->priority = EFX_FILTER_PRI_AUTO; 2243 spec->priority = EFX_FILTER_PRI_AUTO;
2244 spec->flags = (EFX_FILTER_FLAG_RX | 2244 spec->flags = (EFX_FILTER_FLAG_RX |
2245 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | 2245 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); 2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2247 spec->dmaq_id = 0; 2247 spec->dmaq_id = 0;
2248} 2248}
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 3d5ee3259885..194f67d9f3bf 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
418 418
419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN); 419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); 420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
421 while (tries--) { 421 while (--tries) {
422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); 422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) 423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
424 break; 424 break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 52b8ed9bd87c..adff46375a32 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
153 if (ret) 153 if (ret)
154 return ret; 154 return ret;
155 155
156 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 156 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
157 if (ret)
158 sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
159
160 return ret;
157} 161}
158 162
159static const struct of_device_id sun7i_dwmac_match[] = { 163static const struct of_device_id sun7i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3c6549aee11d..a5b869eb4678 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3046,8 +3046,6 @@ int stmmac_suspend(struct net_device *ndev)
3046 priv->hw->dma->stop_tx(priv->ioaddr); 3046 priv->hw->dma->stop_tx(priv->ioaddr);
3047 priv->hw->dma->stop_rx(priv->ioaddr); 3047 priv->hw->dma->stop_rx(priv->ioaddr);
3048 3048
3049 stmmac_clear_descriptors(priv);
3050
3051 /* Enable Power down mode by programming the PMT regs */ 3049 /* Enable Power down mode by programming the PMT regs */
3052 if (device_may_wakeup(priv->device)) { 3050 if (device_may_wakeup(priv->device)) {
3053 priv->hw->mac->pmt(priv->hw, priv->wolopts); 3051 priv->hw->mac->pmt(priv->hw, priv->wolopts);
@@ -3105,7 +3103,12 @@ int stmmac_resume(struct net_device *ndev)
3105 3103
3106 netif_device_attach(ndev); 3104 netif_device_attach(ndev);
3107 3105
3108 init_dma_desc_rings(ndev, GFP_ATOMIC); 3106 priv->cur_rx = 0;
3107 priv->dirty_rx = 0;
3108 priv->dirty_tx = 0;
3109 priv->cur_tx = 0;
3110 stmmac_clear_descriptors(priv);
3111
3109 stmmac_hw_setup(ndev, false); 3112 stmmac_hw_setup(ndev, false);
3110 stmmac_init_tx_coalesce(priv); 3113 stmmac_init_tx_coalesce(priv);
3111 stmmac_set_rx_mode(ndev); 3114 stmmac_set_rx_mode(ndev);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de5c30c9f059..c2b79f5d1c89 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -967,8 +967,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
967 err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, 967 err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
968 &fl6.saddr, &fl6.daddr, prio, ttl, 968 &fl6.saddr, &fl6.daddr, prio, ttl,
969 sport, geneve->dst_port, !udp_csum); 969 sport, geneve->dst_port, !udp_csum);
970
971 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
972 return NETDEV_TX_OK; 970 return NETDEV_TX_OK;
973 971
974tx_error: 972tx_error:
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 908e8d486342..7f8e7662e28c 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -149,9 +149,14 @@ int mdio_mux_init(struct device *dev,
149 } 149 }
150 cb->bus_number = v; 150 cb->bus_number = v;
151 cb->parent = pb; 151 cb->parent = pb;
152
152 cb->mii_bus = mdiobus_alloc(); 153 cb->mii_bus = mdiobus_alloc();
154 if (!cb->mii_bus) {
155 ret_val = -ENOMEM;
156 of_node_put(child_bus_node);
157 break;
158 }
153 cb->mii_bus->priv = cb; 159 cb->mii_bus->priv = cb;
154
155 cb->mii_bus->irq = cb->phy_irq; 160 cb->mii_bus->irq = cb->phy_irq;
156 cb->mii_bus->name = "mdio_mux"; 161 cb->mii_bus->name = "mdio_mux";
157 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", 162 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf6312fafea5..e13ad6cdcc22 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,9 +339,18 @@ static int ksz9021_config_init(struct phy_device *phydev)
339{ 339{
340 const struct device *dev = &phydev->dev; 340 const struct device *dev = &phydev->dev;
341 const struct device_node *of_node = dev->of_node; 341 const struct device_node *of_node = dev->of_node;
342 const struct device *dev_walker;
342 343
343 if (!of_node && dev->parent->of_node) 344 /* The Micrel driver has a deprecated option to place phy OF
344 of_node = dev->parent->of_node; 345 * properties in the MAC node. Walk up the tree of devices to
346 * find a device with an OF node.
347 */
348 dev_walker = &phydev->dev;
349 do {
350 of_node = dev_walker->of_node;
351 dev_walker = dev_walker->parent;
352
353 } while (!of_node && dev_walker);
345 354
346 if (of_node) { 355 if (of_node) {
347 ksz9021_load_values_from_of(phydev, of_node, 356 ksz9021_load_values_from_of(phydev, of_node,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 5e0b43283bce..0a37f840fcc5 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -568,6 +568,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
568 sk->sk_family = PF_PPPOX; 568 sk->sk_family = PF_PPPOX;
569 sk->sk_protocol = PX_PROTO_OE; 569 sk->sk_protocol = PX_PROTO_OE;
570 570
571 INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
572 pppoe_unbind_sock_work);
573
571 return 0; 574 return 0;
572} 575}
573 576
@@ -632,8 +635,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
632 635
633 lock_sock(sk); 636 lock_sock(sk);
634 637
635 INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
636
637 error = -EINVAL; 638 error = -EINVAL;
638 if (sp->sa_protocol != PX_PROTO_OE) 639 if (sp->sa_protocol != PX_PROTO_OE)
639 goto end; 640 goto end;
@@ -663,8 +664,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
663 po->pppoe_dev = NULL; 664 po->pppoe_dev = NULL;
664 } 665 }
665 666
666 memset(sk_pppox(po) + 1, 0, 667 po->pppoe_ifindex = 0;
667 sizeof(struct pppox_sock) - sizeof(struct sock)); 668 memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
669 memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
670 memset(&po->chan, 0, sizeof(po->chan));
671 po->next = NULL;
672 po->num = 0;
673
668 sk->sk_state = PPPOX_NONE; 674 sk->sk_state = PPPOX_NONE;
669 } 675 }
670 676
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index fc69e41d0950..597c53e0a2ec 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
419 struct pptp_opt *opt = &po->proto.pptp; 419 struct pptp_opt *opt = &po->proto.pptp;
420 int error = 0; 420 int error = 0;
421 421
422 if (sockaddr_len < sizeof(struct sockaddr_pppox))
423 return -EINVAL;
424
422 lock_sock(sk); 425 lock_sock(sk);
423 426
424 opt->src_addr = sp->sa_addr.pptp; 427 opt->src_addr = sp->sa_addr.pptp;
@@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
440 struct flowi4 fl4; 443 struct flowi4 fl4;
441 int error = 0; 444 int error = 0;
442 445
446 if (sockaddr_len < sizeof(struct sockaddr_pppox))
447 return -EINVAL;
448
443 if (sp->sa_protocol != PX_PROTO_PPTP) 449 if (sp->sa_protocol != PX_PROTO_PPTP)
444 return -EINVAL; 450 return -EINVAL;
445 451
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bbde9884ab8a..8973abdec9f6 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
159 goto err; 159 goto err;
160 160
161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); 161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data);
162 if (ret) 162 if (ret)
163 goto err; 163 goto err;
164 164
@@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = {
582 .tx_fixup = cdc_mbim_tx_fixup, 582 .tx_fixup = cdc_mbim_tx_fixup,
583}; 583};
584 584
585/* The spefication explicitly allows NDPs to be placed anywhere in the
586 * frame, but some devices fail unless the NDP is placed after the IP
587 * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this
588 * behaviour.
589 *
590 * Note: The current implementation of this feature restricts each NTB
591 * to a single NDP, implying that multiplexed sessions cannot share an
592 * NTB. This might affect performace for multiplexed sessions.
593 */
594static const struct driver_info cdc_mbim_info_ndp_to_end = {
595 .description = "CDC MBIM",
596 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
597 .bind = cdc_mbim_bind,
598 .unbind = cdc_mbim_unbind,
599 .manage_power = cdc_mbim_manage_power,
600 .rx_fixup = cdc_mbim_rx_fixup,
601 .tx_fixup = cdc_mbim_tx_fixup,
602 .data = CDC_NCM_FLAG_NDP_TO_END,
603};
604
585static const struct usb_device_id mbim_devs[] = { 605static const struct usb_device_id mbim_devs[] = {
586 /* This duplicate NCM entry is intentional. MBIM devices can 606 /* This duplicate NCM entry is intentional. MBIM devices can
587 * be disguised as NCM by default, and this is necessary to 607 * be disguised as NCM by default, and this is necessary to
@@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = {
597 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
598 .driver_info = (unsigned long)&cdc_mbim_info, 618 .driver_info = (unsigned long)&cdc_mbim_info,
599 }, 619 },
620 /* Huawei E3372 fails unless NDP comes after the IP packets */
621 { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
622 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
623 },
600 /* default entry */ 624 /* default entry */
601 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 625 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
602 .driver_info = (unsigned long)&cdc_mbim_info_zlp, 626 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 3b1ba8237768..1e9843a41168 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -955,10 +955,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
955 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and 955 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
956 * the wNdpIndex field in the header is actually not consistent with reality. It will be later. 956 * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
957 */ 957 */
958 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 958 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
959 if (ctx->delayed_ndp16->dwSignature == sign) 959 if (ctx->delayed_ndp16->dwSignature == sign)
960 return ctx->delayed_ndp16; 960 return ctx->delayed_ndp16;
961 961
962 /* We can only push a single NDP to the end. Return
963 * NULL to send what we've already got and queue this
964 * skb for later.
965 */
966 else if (ctx->delayed_ndp16->dwSignature)
967 return NULL;
968 }
969
962 /* follow the chain of NDPs, looking for a match */ 970 /* follow the chain of NDPs, looking for a match */
963 while (ndpoffset) { 971 while (ndpoffset) {
964 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); 972 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d9427ca3dba7..2e32c41536ae 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3067,17 +3067,6 @@ static int rtl8152_open(struct net_device *netdev)
3067 3067
3068 mutex_lock(&tp->control); 3068 mutex_lock(&tp->control);
3069 3069
3070 /* The WORK_ENABLE may be set when autoresume occurs */
3071 if (test_bit(WORK_ENABLE, &tp->flags)) {
3072 clear_bit(WORK_ENABLE, &tp->flags);
3073 usb_kill_urb(tp->intr_urb);
3074 cancel_delayed_work_sync(&tp->schedule);
3075
3076 /* disable the tx/rx, if the workqueue has enabled them. */
3077 if (netif_carrier_ok(netdev))
3078 tp->rtl_ops.disable(tp);
3079 }
3080
3081 tp->rtl_ops.up(tp); 3070 tp->rtl_ops.up(tp);
3082 3071
3083 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3072 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3124,12 +3113,6 @@ static int rtl8152_close(struct net_device *netdev)
3124 } else { 3113 } else {
3125 mutex_lock(&tp->control); 3114 mutex_lock(&tp->control);
3126 3115
3127 /* The autosuspend may have been enabled and wouldn't
3128 * be disable when autoresume occurs, because the
3129 * netif_running() would be false.
3130 */
3131 rtl_runtime_suspend_enable(tp, false);
3132
3133 tp->rtl_ops.down(tp); 3116 tp->rtl_ops.down(tp);
3134 3117
3135 mutex_unlock(&tp->control); 3118 mutex_unlock(&tp->control);
@@ -3512,7 +3495,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3512 netif_device_attach(tp->netdev); 3495 netif_device_attach(tp->netdev);
3513 } 3496 }
3514 3497
3515 if (netif_running(tp->netdev)) { 3498 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3516 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3499 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3517 rtl_runtime_suspend_enable(tp, false); 3500 rtl_runtime_suspend_enable(tp, false);
3518 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3501 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
@@ -3532,6 +3515,8 @@ static int rtl8152_resume(struct usb_interface *intf)
3532 } 3515 }
3533 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3516 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3534 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3517 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3518 if (tp->netdev->flags & IFF_UP)
3519 rtl_runtime_suspend_enable(tp, false);
3535 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3520 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3536 } 3521 }
3537 3522
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d8838dedb7a4..f94ab786088f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -140,6 +140,12 @@ struct virtnet_info {
140 140
141 /* CPU hot plug notifier */ 141 /* CPU hot plug notifier */
142 struct notifier_block nb; 142 struct notifier_block nb;
143
144 /* Control VQ buffers: protected by the rtnl lock */
145 struct virtio_net_ctrl_hdr ctrl_hdr;
146 virtio_net_ctrl_ack ctrl_status;
147 u8 ctrl_promisc;
148 u8 ctrl_allmulti;
143}; 149};
144 150
145struct padded_vnet_hdr { 151struct padded_vnet_hdr {
@@ -976,31 +982,30 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
976 struct scatterlist *out) 982 struct scatterlist *out)
977{ 983{
978 struct scatterlist *sgs[4], hdr, stat; 984 struct scatterlist *sgs[4], hdr, stat;
979 struct virtio_net_ctrl_hdr ctrl;
980 virtio_net_ctrl_ack status = ~0;
981 unsigned out_num = 0, tmp; 985 unsigned out_num = 0, tmp;
982 986
983 /* Caller should know better */ 987 /* Caller should know better */
984 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 988 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
985 989
986 ctrl.class = class; 990 vi->ctrl_status = ~0;
987 ctrl.cmd = cmd; 991 vi->ctrl_hdr.class = class;
992 vi->ctrl_hdr.cmd = cmd;
988 /* Add header */ 993 /* Add header */
989 sg_init_one(&hdr, &ctrl, sizeof(ctrl)); 994 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
990 sgs[out_num++] = &hdr; 995 sgs[out_num++] = &hdr;
991 996
992 if (out) 997 if (out)
993 sgs[out_num++] = out; 998 sgs[out_num++] = out;
994 999
995 /* Add return status. */ 1000 /* Add return status. */
996 sg_init_one(&stat, &status, sizeof(status)); 1001 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
997 sgs[out_num] = &stat; 1002 sgs[out_num] = &stat;
998 1003
999 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1004 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1000 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1005 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1001 1006
1002 if (unlikely(!virtqueue_kick(vi->cvq))) 1007 if (unlikely(!virtqueue_kick(vi->cvq)))
1003 return status == VIRTIO_NET_OK; 1008 return vi->ctrl_status == VIRTIO_NET_OK;
1004 1009
1005 /* Spin for a response, the kick causes an ioport write, trapping 1010 /* Spin for a response, the kick causes an ioport write, trapping
1006 * into the hypervisor, so the request should be handled immediately. 1011 * into the hypervisor, so the request should be handled immediately.
@@ -1009,7 +1014,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1009 !virtqueue_is_broken(vi->cvq)) 1014 !virtqueue_is_broken(vi->cvq))
1010 cpu_relax(); 1015 cpu_relax();
1011 1016
1012 return status == VIRTIO_NET_OK; 1017 return vi->ctrl_status == VIRTIO_NET_OK;
1013} 1018}
1014 1019
1015static int virtnet_set_mac_address(struct net_device *dev, void *p) 1020static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1151,7 +1156,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1151{ 1156{
1152 struct virtnet_info *vi = netdev_priv(dev); 1157 struct virtnet_info *vi = netdev_priv(dev);
1153 struct scatterlist sg[2]; 1158 struct scatterlist sg[2];
1154 u8 promisc, allmulti;
1155 struct virtio_net_ctrl_mac *mac_data; 1159 struct virtio_net_ctrl_mac *mac_data;
1156 struct netdev_hw_addr *ha; 1160 struct netdev_hw_addr *ha;
1157 int uc_count; 1161 int uc_count;
@@ -1163,22 +1167,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1163 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1167 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1164 return; 1168 return;
1165 1169
1166 promisc = ((dev->flags & IFF_PROMISC) != 0); 1170 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
1167 allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1171 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1168 1172
1169 sg_init_one(sg, &promisc, sizeof(promisc)); 1173 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
1170 1174
1171 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1175 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1172 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1176 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1173 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1177 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1174 promisc ? "en" : "dis"); 1178 vi->ctrl_promisc ? "en" : "dis");
1175 1179
1176 sg_init_one(sg, &allmulti, sizeof(allmulti)); 1180 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
1177 1181
1178 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1182 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1179 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1183 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1180 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1184 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1181 allmulti ? "en" : "dis"); 1185 vi->ctrl_allmulti ? "en" : "dis");
1182 1186
1183 uc_count = netdev_uc_count(dev); 1187 uc_count = netdev_uc_count(dev);
1184 mc_count = netdev_mc_count(dev); 1188 mc_count = netdev_mc_count(dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6369a5734d4c..ba363cedef80 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1158 struct pcpu_sw_netstats *stats; 1158 struct pcpu_sw_netstats *stats;
1159 union vxlan_addr saddr; 1159 union vxlan_addr saddr;
1160 int err = 0; 1160 int err = 0;
1161 union vxlan_addr *remote_ip;
1162 1161
1163 /* For flow based devices, map all packets to VNI 0 */ 1162 /* For flow based devices, map all packets to VNI 0 */
1164 if (vs->flags & VXLAN_F_COLLECT_METADATA) 1163 if (vs->flags & VXLAN_F_COLLECT_METADATA)
@@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1169 if (!vxlan) 1168 if (!vxlan)
1170 goto drop; 1169 goto drop;
1171 1170
1172 remote_ip = &vxlan->default_dst.remote_ip;
1173 skb_reset_mac_header(skb); 1171 skb_reset_mac_header(skb);
1174 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); 1172 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1175 skb->protocol = eth_type_trans(skb, vxlan->dev); 1173 skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1179 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1177 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1180 goto drop; 1178 goto drop;
1181 1179
1182 /* Re-examine inner Ethernet packet */ 1180 /* Get data from the outer IP header */
1183 if (remote_ip->sa.sa_family == AF_INET) { 1181 if (vxlan_get_sk_family(vs) == AF_INET) {
1184 oip = ip_hdr(skb); 1182 oip = ip_hdr(skb);
1185 saddr.sin.sin_addr.s_addr = oip->saddr; 1183 saddr.sin.sin_addr.s_addr = oip->saddr;
1186 saddr.sa.sa_family = AF_INET; 1184 saddr.sa.sa_family = AF_INET;
@@ -1848,6 +1846,34 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
1848 !(vxflags & VXLAN_F_UDP_CSUM)); 1846 !(vxflags & VXLAN_F_UDP_CSUM));
1849} 1847}
1850 1848
1849#if IS_ENABLED(CONFIG_IPV6)
1850static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1851 struct sk_buff *skb, int oif,
1852 const struct in6_addr *daddr,
1853 struct in6_addr *saddr)
1854{
1855 struct dst_entry *ndst;
1856 struct flowi6 fl6;
1857 int err;
1858
1859 memset(&fl6, 0, sizeof(fl6));
1860 fl6.flowi6_oif = oif;
1861 fl6.daddr = *daddr;
1862 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
1863 fl6.flowi6_mark = skb->mark;
1864 fl6.flowi6_proto = IPPROTO_UDP;
1865
1866 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1867 vxlan->vn6_sock->sock->sk,
1868 &ndst, &fl6);
1869 if (err < 0)
1870 return ERR_PTR(err);
1871
1872 *saddr = fl6.saddr;
1873 return ndst;
1874}
1875#endif
1876
1851/* Bypass encapsulation if the destination is local */ 1877/* Bypass encapsulation if the destination is local */
1852static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1878static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1853 struct vxlan_dev *dst_vxlan) 1879 struct vxlan_dev *dst_vxlan)
@@ -2035,21 +2061,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2035#if IS_ENABLED(CONFIG_IPV6) 2061#if IS_ENABLED(CONFIG_IPV6)
2036 } else { 2062 } else {
2037 struct dst_entry *ndst; 2063 struct dst_entry *ndst;
2038 struct flowi6 fl6; 2064 struct in6_addr saddr;
2039 u32 rt6i_flags; 2065 u32 rt6i_flags;
2040 2066
2041 if (!vxlan->vn6_sock) 2067 if (!vxlan->vn6_sock)
2042 goto drop; 2068 goto drop;
2043 sk = vxlan->vn6_sock->sock->sk; 2069 sk = vxlan->vn6_sock->sock->sk;
2044 2070
2045 memset(&fl6, 0, sizeof(fl6)); 2071 ndst = vxlan6_get_route(vxlan, skb,
2046 fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; 2072 rdst ? rdst->remote_ifindex : 0,
2047 fl6.daddr = dst->sin6.sin6_addr; 2073 &dst->sin6.sin6_addr, &saddr);
2048 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 2074 if (IS_ERR(ndst)) {
2049 fl6.flowi6_mark = skb->mark;
2050 fl6.flowi6_proto = IPPROTO_UDP;
2051
2052 if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
2053 netdev_dbg(dev, "no route to %pI6\n", 2075 netdev_dbg(dev, "no route to %pI6\n",
2054 &dst->sin6.sin6_addr); 2076 &dst->sin6.sin6_addr);
2055 dev->stats.tx_carrier_errors++; 2077 dev->stats.tx_carrier_errors++;
@@ -2081,7 +2103,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2081 } 2103 }
2082 2104
2083 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2105 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2084 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, 2106 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
2085 0, ttl, src_port, dst_port, htonl(vni << 8), md, 2107 0, ttl, src_port, dst_port, htonl(vni << 8), md,
2086 !net_eq(vxlan->net, dev_net(vxlan->dev)), 2108 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2087 flags); 2109 flags);
@@ -2395,9 +2417,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2395 vxlan->cfg.port_max, true); 2417 vxlan->cfg.port_max, true);
2396 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2418 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2397 2419
2398 if (ip_tunnel_info_af(info) == AF_INET) 2420 if (ip_tunnel_info_af(info) == AF_INET) {
2421 if (!vxlan->vn4_sock)
2422 return -EINVAL;
2399 return egress_ipv4_tun_info(dev, skb, info, sport, dport); 2423 return egress_ipv4_tun_info(dev, skb, info, sport, dport);
2400 return -EINVAL; 2424 } else {
2425#if IS_ENABLED(CONFIG_IPV6)
2426 struct dst_entry *ndst;
2427
2428 if (!vxlan->vn6_sock)
2429 return -EINVAL;
2430 ndst = vxlan6_get_route(vxlan, skb, 0,
2431 &info->key.u.ipv6.dst,
2432 &info->key.u.ipv6.src);
2433 if (IS_ERR(ndst))
2434 return PTR_ERR(ndst);
2435 dst_release(ndst);
2436
2437 info->key.tp_src = sport;
2438 info->key.tp_dst = dport;
2439#else /* !CONFIG_IPV6 */
2440 return -EPFNOSUPPORT;
2441#endif
2442 }
2443 return 0;
2401} 2444}
2402 2445
2403static const struct net_device_ops vxlan_netdev_ops = { 2446static const struct net_device_ops vxlan_netdev_ops = {
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e481f3710bd3..1049c34e7d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
258 struct netrx_pending_operations *npo) 258 struct netrx_pending_operations *npo)
259{ 259{
260 struct xenvif_rx_meta *meta; 260 struct xenvif_rx_meta *meta;
261 struct xen_netif_rx_request *req; 261 struct xen_netif_rx_request req;
262 262
263 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
264 264
265 meta = npo->meta + npo->meta_prod++; 265 meta = npo->meta + npo->meta_prod++;
266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
267 meta->gso_size = 0; 267 meta->gso_size = 0;
268 meta->size = 0; 268 meta->size = 0;
269 meta->id = req->id; 269 meta->id = req.id;
270 270
271 npo->copy_off = 0; 271 npo->copy_off = 0;
272 npo->copy_gref = req->gref; 272 npo->copy_gref = req.gref;
273 273
274 return meta; 274 return meta;
275} 275}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
424 struct xenvif *vif = netdev_priv(skb->dev); 424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags; 425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i; 426 int i;
427 struct xen_netif_rx_request *req; 427 struct xen_netif_rx_request req;
428 struct xenvif_rx_meta *meta; 428 struct xenvif_rx_meta *meta;
429 unsigned char *data; 429 unsigned char *data;
430 int head = 1; 430 int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
443 443
444 /* Set up a GSO prefix descriptor, if necessary */ 444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type) & vif->gso_prefix_mask) { 445 if ((1 << gso_type) & vif->gso_prefix_mask) {
446 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
447 meta = npo->meta + npo->meta_prod++; 447 meta = npo->meta + npo->meta_prod++;
448 meta->gso_type = gso_type; 448 meta->gso_type = gso_type;
449 meta->gso_size = skb_shinfo(skb)->gso_size; 449 meta->gso_size = skb_shinfo(skb)->gso_size;
450 meta->size = 0; 450 meta->size = 0;
451 meta->id = req->id; 451 meta->id = req.id;
452 } 452 }
453 453
454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
455 meta = npo->meta + npo->meta_prod++; 455 meta = npo->meta + npo->meta_prod++;
456 456
457 if ((1 << gso_type) & vif->gso_mask) { 457 if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
463 } 463 }
464 464
465 meta->size = 0; 465 meta->size = 0;
466 meta->id = req->id; 466 meta->id = req.id;
467 npo->copy_off = 0; 467 npo->copy_off = 0;
468 npo->copy_gref = req->gref; 468 npo->copy_gref = req.gref;
469 469
470 data = skb->data; 470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) { 471 while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit. 680 * Otherwise the interface can seize up due to insufficient credit.
681 */ 681 */
682 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 682 max_burst = max(131072UL, queue->credit_bytes);
683 max_burst = min(max_burst, 131072UL);
684 max_burst = max(max_burst, queue->credit_bytes);
685 683
686 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
687 max_credit = queue->remaining_credit + queue->credit_bytes; 685 max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
711 spin_unlock_irqrestore(&queue->response_lock, flags); 709 spin_unlock_irqrestore(&queue->response_lock, flags);
712 if (cons == end) 710 if (cons == end)
713 break; 711 break;
714 txp = RING_GET_REQUEST(&queue->tx, cons++); 712 RING_COPY_REQUEST(&queue->tx, cons++, txp);
715 } while (1); 713 } while (1);
716 queue->tx.req_cons = cons; 714 queue->tx.req_cons = cons;
717} 715}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
778 if (drop_err) 776 if (drop_err)
779 txp = &dropped_tx; 777 txp = &dropped_tx;
780 778
781 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 779 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
782 sizeof(*txp));
783 780
784 /* If the guest submitted a frame >= 64 KiB then 781 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will 782 * first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1112 return -EBADR; 1109 return -EBADR;
1113 } 1110 }
1114 1111
1115 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1112 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1116 sizeof(extra));
1117 if (unlikely(!extra.type || 1113 if (unlikely(!extra.type ||
1118 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1114 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1119 queue->tx.req_cons = ++cons; 1115 queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1322 1318
1323 idx = queue->tx.req_cons; 1319 idx = queue->tx.req_cons;
1324 rmb(); /* Ensure that we see the request before we copy it. */ 1320 rmb(); /* Ensure that we see the request before we copy it. */
1325 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1321 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1326 1322
1327 /* Credit-based scheduling. */ 1323 /* Credit-based scheduling. */
1328 if (txreq.size > queue->remaining_credit && 1324 if (txreq.size > queue->remaining_credit &&
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 06c336410235..15f2acb4d5cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -271,9 +271,9 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
271 return 0; 271 return 0;
272} 272}
273 273
274static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) 274static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
275{ 275{
276 struct nvme_ns *ns = q->queuedata; 276 struct nvme_ns *ns = nvmdev->q->queuedata;
277 struct nvme_dev *dev = ns->dev; 277 struct nvme_dev *dev = ns->dev;
278 struct nvme_nvm_id *nvme_nvm_id; 278 struct nvme_nvm_id *nvme_nvm_id;
279 struct nvme_nvm_command c = {}; 279 struct nvme_nvm_command c = {};
@@ -308,10 +308,10 @@ out:
308 return ret; 308 return ret;
309} 309}
310 310
311static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, 311static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
312 nvm_l2p_update_fn *update_l2p, void *priv) 312 nvm_l2p_update_fn *update_l2p, void *priv)
313{ 313{
314 struct nvme_ns *ns = q->queuedata; 314 struct nvme_ns *ns = nvmdev->q->queuedata;
315 struct nvme_dev *dev = ns->dev; 315 struct nvme_dev *dev = ns->dev;
316 struct nvme_nvm_command c = {}; 316 struct nvme_nvm_command c = {};
317 u32 len = queue_max_hw_sectors(dev->admin_q) << 9; 317 u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
@@ -415,10 +415,10 @@ out:
415 return ret; 415 return ret;
416} 416}
417 417
418static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, 418static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
419 int type) 419 int type)
420{ 420{
421 struct nvme_ns *ns = q->queuedata; 421 struct nvme_ns *ns = nvmdev->q->queuedata;
422 struct nvme_dev *dev = ns->dev; 422 struct nvme_dev *dev = ns->dev;
423 struct nvme_nvm_command c = {}; 423 struct nvme_nvm_command c = {};
424 int ret = 0; 424 int ret = 0;
@@ -455,7 +455,7 @@ static void nvme_nvm_end_io(struct request *rq, int error)
455 struct nvm_rq *rqd = rq->end_io_data; 455 struct nvm_rq *rqd = rq->end_io_data;
456 struct nvm_dev *dev = rqd->dev; 456 struct nvm_dev *dev = rqd->dev;
457 457
458 if (dev->mt->end_io(rqd, error)) 458 if (dev->mt && dev->mt->end_io(rqd, error))
459 pr_err("nvme: err status: %x result: %lx\n", 459 pr_err("nvme: err status: %x result: %lx\n",
460 rq->errors, (unsigned long)rq->special); 460 rq->errors, (unsigned long)rq->special);
461 461
@@ -463,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error)
463 blk_mq_free_request(rq); 463 blk_mq_free_request(rq);
464} 464}
465 465
466static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) 466static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
467{ 467{
468 struct request_queue *q = dev->q;
468 struct nvme_ns *ns = q->queuedata; 469 struct nvme_ns *ns = q->queuedata;
469 struct request *rq; 470 struct request *rq;
470 struct bio *bio = rqd->bio; 471 struct bio *bio = rqd->bio;
@@ -502,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
502 return 0; 503 return 0;
503} 504}
504 505
505static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd) 506static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
506{ 507{
508 struct request_queue *q = dev->q;
507 struct nvme_ns *ns = q->queuedata; 509 struct nvme_ns *ns = q->queuedata;
508 struct nvme_nvm_command c = {}; 510 struct nvme_nvm_command c = {};
509 511
@@ -515,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
515 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); 517 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
516} 518}
517 519
518static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name) 520static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
519{ 521{
520 struct nvme_ns *ns = q->queuedata; 522 struct nvme_ns *ns = nvmdev->q->queuedata;
521 struct nvme_dev *dev = ns->dev; 523 struct nvme_dev *dev = ns->dev;
522 524
523 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0); 525 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
@@ -530,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool)
530 dma_pool_destroy(dma_pool); 532 dma_pool_destroy(dma_pool);
531} 533}
532 534
533static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool, 535static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
534 gfp_t mem_flags, dma_addr_t *dma_handler) 536 gfp_t mem_flags, dma_addr_t *dma_handler)
535{ 537{
536 return dma_pool_alloc(pool, mem_flags, dma_handler); 538 return dma_pool_alloc(pool, mem_flags, dma_handler);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9e294ff4e652..0c67b57be83c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2540,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2540{ 2540{
2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); 2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
2542 2542
2543 if (kill) 2543 if (kill) {
2544 blk_set_queue_dying(ns->queue); 2544 blk_set_queue_dying(ns->queue);
2545
2546 /*
2547 * The controller was shutdown first if we got here through
2548 * device removal. The shutdown may requeue outstanding
2549 * requests. These need to be aborted immediately so
2550 * del_gendisk doesn't block indefinitely for their completion.
2551 */
2552 blk_mq_abort_requeue_list(ns->queue);
2553 }
2545 if (ns->disk->flags & GENHD_FL_UP) 2554 if (ns->disk->flags & GENHD_FL_UP)
2546 del_gendisk(ns->disk); 2555 del_gendisk(ns->disk);
2547 if (kill || !blk_queue_dying(ns->queue)) { 2556 if (kill || !blk_queue_dying(ns->queue)) {
@@ -2977,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2977{ 2986{
2978 struct nvme_ns *ns, *next; 2987 struct nvme_ns *ns, *next;
2979 2988
2989 if (nvme_io_incapable(dev)) {
2990 /*
2991 * If the device is not capable of IO (surprise hot-removal,
2992 * for example), we need to quiesce prior to deleting the
2993 * namespaces. This will end outstanding requests and prevent
2994 * attempts to sync dirty data.
2995 */
2996 nvme_dev_shutdown(dev);
2997 }
2980 list_for_each_entry_safe(ns, next, &dev->namespaces, list) 2998 list_for_each_entry_safe(ns, next, &dev->namespaces, list)
2981 nvme_ns_remove(ns); 2999 nvme_ns_remove(ns);
2982} 3000}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index cd53fe4a0c86..9582c5703b3c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -485,9 +485,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
485 int rone; 485 int rone;
486 u64 offset = OF_BAD_ADDR; 486 u64 offset = OF_BAD_ADDR;
487 487
488 /* Normally, an absence of a "ranges" property means we are 488 /*
489 * Normally, an absence of a "ranges" property means we are
489 * crossing a non-translatable boundary, and thus the addresses 490 * crossing a non-translatable boundary, and thus the addresses
490 * below the current not cannot be converted to CPU physical ones. 491 * below the current cannot be converted to CPU physical ones.
491 * Unfortunately, while this is very clear in the spec, it's not 492 * Unfortunately, while this is very clear in the spec, it's not
492 * what Apple understood, and they do have things like /uni-n or 493 * what Apple understood, and they do have things like /uni-n or
493 * /ht nodes with no "ranges" property and a lot of perfectly 494 * /ht nodes with no "ranges" property and a lot of perfectly
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d2430298a309..655f79db7899 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/initrd.h> 14#include <linux/initrd.h>
15#include <linux/memblock.h> 15#include <linux/memblock.h>
16#include <linux/mutex.h>
16#include <linux/of.h> 17#include <linux/of.h>
17#include <linux/of_fdt.h> 18#include <linux/of_fdt.h>
18#include <linux/of_reserved_mem.h> 19#include <linux/of_reserved_mem.h>
@@ -436,6 +437,8 @@ static void *kernel_tree_alloc(u64 size, u64 align)
436 return kzalloc(size, GFP_KERNEL); 437 return kzalloc(size, GFP_KERNEL);
437} 438}
438 439
440static DEFINE_MUTEX(of_fdt_unflatten_mutex);
441
439/** 442/**
440 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob 443 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
441 * 444 *
@@ -447,7 +450,9 @@ static void *kernel_tree_alloc(u64 size, u64 align)
447void of_fdt_unflatten_tree(const unsigned long *blob, 450void of_fdt_unflatten_tree(const unsigned long *blob,
448 struct device_node **mynodes) 451 struct device_node **mynodes)
449{ 452{
453 mutex_lock(&of_fdt_unflatten_mutex);
450 __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc); 454 __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc);
455 mutex_unlock(&of_fdt_unflatten_mutex);
451} 456}
452EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); 457EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
453 458
@@ -1041,7 +1046,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1041int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, 1046int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
1042 phys_addr_t size, bool nomap) 1047 phys_addr_t size, bool nomap)
1043{ 1048{
1044 pr_err("Reserved memory not supported, ignoring range 0x%pa - 0x%pa%s\n", 1049 pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
1045 &base, &size, nomap ? " (nomap)" : ""); 1050 &base, &size, nomap ? " (nomap)" : "");
1046 return -ENOSYS; 1051 return -ENOSYS;
1047} 1052}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 902b89be7217..4fa916dffc91 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
53 * Returns a pointer to the interrupt parent node, or NULL if the interrupt 53 * Returns a pointer to the interrupt parent node, or NULL if the interrupt
54 * parent could not be determined. 54 * parent could not be determined.
55 */ 55 */
56static struct device_node *of_irq_find_parent(struct device_node *child) 56struct device_node *of_irq_find_parent(struct device_node *child)
57{ 57{
58 struct device_node *p; 58 struct device_node *p;
59 const __be32 *parp; 59 const __be32 *parp;
@@ -77,6 +77,7 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
77 77
78 return p; 78 return p;
79} 79}
80EXPORT_SYMBOL_GPL(of_irq_find_parent);
80 81
81/** 82/**
82 * of_irq_parse_raw - Low level interrupt tree parsing 83 * of_irq_parse_raw - Low level interrupt tree parsing
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index be77e75c587d..1a3556a9e9ea 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -206,7 +206,13 @@ static int __init __rmem_cmp(const void *a, const void *b)
206{ 206{
207 const struct reserved_mem *ra = a, *rb = b; 207 const struct reserved_mem *ra = a, *rb = b;
208 208
209 return ra->base - rb->base; 209 if (ra->base < rb->base)
210 return -1;
211
212 if (ra->base > rb->base)
213 return 1;
214
215 return 0;
210} 216}
211 217
212static void __init __rmem_check_for_overlap(void) 218static void __init __rmem_check_for_overlap(void)
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 761e77bfce5d..e56f1569f6c3 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
104 struct scatterlist *contig_sg; /* contig chunk head */ 104 struct scatterlist *contig_sg; /* contig chunk head */
105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
106 unsigned int n_mappings = 0; 106 unsigned int n_mappings = 0;
107 unsigned int max_seg_size = dma_get_max_seg_size(dev); 107 unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
108 (unsigned)DMA_CHUNK_SIZE);
109 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
110 if (max_seg_boundary) /* check if the addition above didn't overflow */
111 max_seg_size = min(max_seg_size, max_seg_boundary);
108 112
109 while (nents > 0) { 113 while (nents > 0) {
110 114
@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
138 142
139 /* 143 /*
140 ** First make sure current dma stream won't 144 ** First make sure current dma stream won't
141 ** exceed DMA_CHUNK_SIZE if we coalesce the 145 ** exceed max_seg_size if we coalesce the
142 ** next entry. 146 ** next entry.
143 */ 147 */
144 if(unlikely(ALIGN(dma_len + dma_offset + startsg->length, 148 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
145 IOVP_SIZE) > DMA_CHUNK_SIZE)) 149 max_seg_size))
146 break;
147
148 if (startsg->length + dma_len > max_seg_size)
149 break; 150 break;
150 151
151 /* 152 /*
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index e5dda38bdde5..99da549d5d06 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -55,8 +55,10 @@
55#define TLP_CFG_DW2(bus, devfn, offset) \ 55#define TLP_CFG_DW2(bus, devfn, offset) \
56 (((bus) << 24) | ((devfn) << 16) | (offset)) 56 (((bus) << 24) | ((devfn) << 16) | (offset))
57#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) 57#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
58#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
58#define TLP_HDR_SIZE 3 59#define TLP_HDR_SIZE 3
59#define TLP_LOOP 500 60#define TLP_LOOP 500
61#define RP_DEVFN 0
60 62
61#define INTX_NUM 4 63#define INTX_NUM 4
62 64
@@ -166,34 +168,41 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie,
166 168
167static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) 169static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
168{ 170{
169 u8 loop; 171 int i;
170 bool sop = 0; 172 bool sop = 0;
171 u32 ctrl; 173 u32 ctrl;
172 u32 reg0, reg1; 174 u32 reg0, reg1;
175 u32 comp_status = 1;
173 176
174 /* 177 /*
175 * Minimum 2 loops to read TLP headers and 1 loop to read data 178 * Minimum 2 loops to read TLP headers and 1 loop to read data
176 * payload. 179 * payload.
177 */ 180 */
178 for (loop = 0; loop < TLP_LOOP; loop++) { 181 for (i = 0; i < TLP_LOOP; i++) {
179 ctrl = cra_readl(pcie, RP_RXCPL_STATUS); 182 ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
180 if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { 183 if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
181 reg0 = cra_readl(pcie, RP_RXCPL_REG0); 184 reg0 = cra_readl(pcie, RP_RXCPL_REG0);
182 reg1 = cra_readl(pcie, RP_RXCPL_REG1); 185 reg1 = cra_readl(pcie, RP_RXCPL_REG1);
183 186
184 if (ctrl & RP_RXCPL_SOP) 187 if (ctrl & RP_RXCPL_SOP) {
185 sop = true; 188 sop = true;
189 comp_status = TLP_COMP_STATUS(reg1);
190 }
186 191
187 if (ctrl & RP_RXCPL_EOP) { 192 if (ctrl & RP_RXCPL_EOP) {
193 if (comp_status)
194 return PCIBIOS_DEVICE_NOT_FOUND;
195
188 if (value) 196 if (value)
189 *value = reg0; 197 *value = reg0;
198
190 return PCIBIOS_SUCCESSFUL; 199 return PCIBIOS_SUCCESSFUL;
191 } 200 }
192 } 201 }
193 udelay(5); 202 udelay(5);
194 } 203 }
195 204
196 return -ENOENT; 205 return PCIBIOS_DEVICE_NOT_FOUND;
197} 206}
198 207
199static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, 208static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
@@ -233,7 +242,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
233 else 242 else
234 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1); 243 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
235 244
236 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn), 245 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
237 TLP_READ_TAG, byte_en); 246 TLP_READ_TAG, byte_en);
238 headers[2] = TLP_CFG_DW2(bus, devfn, where); 247 headers[2] = TLP_CFG_DW2(bus, devfn, where);
239 248
@@ -253,7 +262,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
253 else 262 else
254 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1); 263 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
255 264
256 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn), 265 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
257 TLP_WRITE_TAG, byte_en); 266 TLP_WRITE_TAG, byte_en);
258 headers[2] = TLP_CFG_DW2(bus, devfn, where); 267 headers[2] = TLP_CFG_DW2(bus, devfn, where);
259 268
@@ -458,7 +467,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
458 struct device_node *node = dev->of_node; 467 struct device_node *node = dev->of_node;
459 468
460 /* Setup INTx */ 469 /* Setup INTx */
461 pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM, 470 pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1,
462 &intx_domain_ops, pcie); 471 &intx_domain_ops, pcie);
463 if (!pcie->irq_domain) { 472 if (!pcie->irq_domain) {
464 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 473 dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 53e463244bb7..7eaa4c87fec7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -54,7 +54,7 @@ static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
54 struct irq_domain *domain; 54 struct irq_domain *domain;
55 55
56 domain = pci_msi_get_domain(dev); 56 domain = pci_msi_get_domain(dev);
57 if (domain) 57 if (domain && irq_domain_is_hierarchy(domain))
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); 58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
59 59
60 return arch_setup_msi_irqs(dev, nvec, type); 60 return arch_setup_msi_irqs(dev, nvec, type);
@@ -65,7 +65,7 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
65 struct irq_domain *domain; 65 struct irq_domain *domain;
66 66
67 domain = pci_msi_get_domain(dev); 67 domain = pci_msi_get_domain(dev);
68 if (domain) 68 if (domain && irq_domain_is_hierarchy(domain))
69 pci_msi_domain_free_irqs(domain, dev); 69 pci_msi_domain_free_irqs(domain, dev);
70 else 70 else
71 arch_teardown_msi_irqs(dev); 71 arch_teardown_msi_irqs(dev);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 7eb5859dd035..03cb3ea2d2c0 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -233,6 +233,7 @@ config PHY_SUN9I_USB
233 tristate "Allwinner sun9i SoC USB PHY driver" 233 tristate "Allwinner sun9i SoC USB PHY driver"
234 depends on ARCH_SUNXI && HAS_IOMEM && OF 234 depends on ARCH_SUNXI && HAS_IOMEM && OF
235 depends on RESET_CONTROLLER 235 depends on RESET_CONTROLLER
236 depends on USB_COMMON
236 select GENERIC_PHY 237 select GENERIC_PHY
237 help 238 help
238 Enable this to support the transceiver that is part of Allwinner 239 Enable this to support the transceiver that is part of Allwinner
diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c
index 7ad72b7d2b98..082c03f6438f 100644
--- a/drivers/phy/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/phy-bcm-cygnus-pcie.c
@@ -128,6 +128,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
128 struct phy_provider *provider; 128 struct phy_provider *provider;
129 struct resource *res; 129 struct resource *res;
130 unsigned cnt = 0; 130 unsigned cnt = 0;
131 int ret;
131 132
132 if (of_get_child_count(node) == 0) { 133 if (of_get_child_count(node) == 0) {
133 dev_err(dev, "PHY no child node\n"); 134 dev_err(dev, "PHY no child node\n");
@@ -154,24 +155,28 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
154 if (of_property_read_u32(child, "reg", &id)) { 155 if (of_property_read_u32(child, "reg", &id)) {
155 dev_err(dev, "missing reg property for %s\n", 156 dev_err(dev, "missing reg property for %s\n",
156 child->name); 157 child->name);
157 return -EINVAL; 158 ret = -EINVAL;
159 goto put_child;
158 } 160 }
159 161
160 if (id >= MAX_NUM_PHYS) { 162 if (id >= MAX_NUM_PHYS) {
161 dev_err(dev, "invalid PHY id: %u\n", id); 163 dev_err(dev, "invalid PHY id: %u\n", id);
162 return -EINVAL; 164 ret = -EINVAL;
165 goto put_child;
163 } 166 }
164 167
165 if (core->phys[id].phy) { 168 if (core->phys[id].phy) {
166 dev_err(dev, "duplicated PHY id: %u\n", id); 169 dev_err(dev, "duplicated PHY id: %u\n", id);
167 return -EINVAL; 170 ret = -EINVAL;
171 goto put_child;
168 } 172 }
169 173
170 p = &core->phys[id]; 174 p = &core->phys[id];
171 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); 175 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
172 if (IS_ERR(p->phy)) { 176 if (IS_ERR(p->phy)) {
173 dev_err(dev, "failed to create PHY\n"); 177 dev_err(dev, "failed to create PHY\n");
174 return PTR_ERR(p->phy); 178 ret = PTR_ERR(p->phy);
179 goto put_child;
175 } 180 }
176 181
177 p->core = core; 182 p->core = core;
@@ -191,6 +196,9 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
191 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); 196 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt);
192 197
193 return 0; 198 return 0;
199put_child:
200 of_node_put(child);
201 return ret;
194} 202}
195 203
196static const struct of_device_id cygnus_pcie_phy_match_table[] = { 204static const struct of_device_id cygnus_pcie_phy_match_table[] = {
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 77a2e054fdea..f84a33a1bdd9 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -195,7 +195,7 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
195 struct phy_provider *phy_provider; 195 struct phy_provider *phy_provider;
196 struct phy_berlin_priv *priv; 196 struct phy_berlin_priv *priv;
197 struct resource *res; 197 struct resource *res;
198 int i = 0; 198 int ret, i = 0;
199 u32 phy_id; 199 u32 phy_id;
200 200
201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -237,22 +237,27 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
237 if (of_property_read_u32(child, "reg", &phy_id)) { 237 if (of_property_read_u32(child, "reg", &phy_id)) {
238 dev_err(dev, "missing reg property in node %s\n", 238 dev_err(dev, "missing reg property in node %s\n",
239 child->name); 239 child->name);
240 return -EINVAL; 240 ret = -EINVAL;
241 goto put_child;
241 } 242 }
242 243
243 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { 244 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) {
244 dev_err(dev, "invalid reg in node %s\n", child->name); 245 dev_err(dev, "invalid reg in node %s\n", child->name);
245 return -EINVAL; 246 ret = -EINVAL;
247 goto put_child;
246 } 248 }
247 249
248 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); 250 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL);
249 if (!phy_desc) 251 if (!phy_desc) {
250 return -ENOMEM; 252 ret = -ENOMEM;
253 goto put_child;
254 }
251 255
252 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); 256 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops);
253 if (IS_ERR(phy)) { 257 if (IS_ERR(phy)) {
254 dev_err(dev, "failed to create PHY %d\n", phy_id); 258 dev_err(dev, "failed to create PHY %d\n", phy_id);
255 return PTR_ERR(phy); 259 ret = PTR_ERR(phy);
260 goto put_child;
256 } 261 }
257 262
258 phy_desc->phy = phy; 263 phy_desc->phy = phy;
@@ -269,6 +274,9 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
269 phy_provider = 274 phy_provider =
270 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); 275 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate);
271 return PTR_ERR_OR_ZERO(phy_provider); 276 return PTR_ERR_OR_ZERO(phy_provider);
277put_child:
278 of_node_put(child);
279 return ret;
272} 280}
273 281
274static const struct of_device_id phy_berlin_sata_of_match[] = { 282static const struct of_device_id phy_berlin_sata_of_match[] = {
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c
index 8a2cb16a1937..cd9dba820566 100644
--- a/drivers/phy/phy-brcmstb-sata.c
+++ b/drivers/phy/phy-brcmstb-sata.c
@@ -140,7 +140,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
140 struct brcm_sata_phy *priv; 140 struct brcm_sata_phy *priv;
141 struct resource *res; 141 struct resource *res;
142 struct phy_provider *provider; 142 struct phy_provider *provider;
143 int count = 0; 143 int ret, count = 0;
144 144
145 if (of_get_child_count(dn) == 0) 145 if (of_get_child_count(dn) == 0)
146 return -ENODEV; 146 return -ENODEV;
@@ -163,16 +163,19 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
163 if (of_property_read_u32(child, "reg", &id)) { 163 if (of_property_read_u32(child, "reg", &id)) {
164 dev_err(dev, "missing reg property in node %s\n", 164 dev_err(dev, "missing reg property in node %s\n",
165 child->name); 165 child->name);
166 return -EINVAL; 166 ret = -EINVAL;
167 goto put_child;
167 } 168 }
168 169
169 if (id >= MAX_PORTS) { 170 if (id >= MAX_PORTS) {
170 dev_err(dev, "invalid reg: %u\n", id); 171 dev_err(dev, "invalid reg: %u\n", id);
171 return -EINVAL; 172 ret = -EINVAL;
173 goto put_child;
172 } 174 }
173 if (priv->phys[id].phy) { 175 if (priv->phys[id].phy) {
174 dev_err(dev, "already registered port %u\n", id); 176 dev_err(dev, "already registered port %u\n", id);
175 return -EINVAL; 177 ret = -EINVAL;
178 goto put_child;
176 } 179 }
177 180
178 port = &priv->phys[id]; 181 port = &priv->phys[id];
@@ -182,7 +185,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
182 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); 185 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
183 if (IS_ERR(port->phy)) { 186 if (IS_ERR(port->phy)) {
184 dev_err(dev, "failed to create PHY\n"); 187 dev_err(dev, "failed to create PHY\n");
185 return PTR_ERR(port->phy); 188 ret = PTR_ERR(port->phy);
189 goto put_child;
186 } 190 }
187 191
188 phy_set_drvdata(port->phy, port); 192 phy_set_drvdata(port->phy, port);
@@ -198,6 +202,9 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
198 dev_info(dev, "registered %d port(s)\n", count); 202 dev_info(dev, "registered %d port(s)\n", count);
199 203
200 return 0; 204 return 0;
205put_child:
206 of_node_put(child);
207 return ret;
201} 208}
202 209
203static struct platform_driver brcm_sata_phy_driver = { 210static struct platform_driver brcm_sata_phy_driver = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index fc48fac003a6..8c7f27db6ad3 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -636,8 +636,9 @@ EXPORT_SYMBOL_GPL(devm_of_phy_get);
636 * @np: node containing the phy 636 * @np: node containing the phy
637 * @index: index of the phy 637 * @index: index of the phy
638 * 638 *
639 * Gets the phy using _of_phy_get(), and associates a device with it using 639 * Gets the phy using _of_phy_get(), then gets a refcount to it,
640 * devres. On driver detach, release function is invoked on the devres data, 640 * and associates a device with it using devres. On driver detach,
641 * release function is invoked on the devres data,
641 * then, devres data is freed. 642 * then, devres data is freed.
642 * 643 *
643 */ 644 */
@@ -651,13 +652,21 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
651 return ERR_PTR(-ENOMEM); 652 return ERR_PTR(-ENOMEM);
652 653
653 phy = _of_phy_get(np, index); 654 phy = _of_phy_get(np, index);
654 if (!IS_ERR(phy)) { 655 if (IS_ERR(phy)) {
655 *ptr = phy;
656 devres_add(dev, ptr);
657 } else {
658 devres_free(ptr); 656 devres_free(ptr);
657 return phy;
659 } 658 }
660 659
660 if (!try_module_get(phy->ops->owner)) {
661 devres_free(ptr);
662 return ERR_PTR(-EPROBE_DEFER);
663 }
664
665 get_device(&phy->dev);
666
667 *ptr = phy;
668 devres_add(dev, ptr);
669
661 return phy; 670 return phy;
662} 671}
663EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); 672EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index c47b56b4a2b8..3acd2a1808df 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1226,15 +1226,18 @@ static int miphy28lp_probe(struct platform_device *pdev)
1226 1226
1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
1228 GFP_KERNEL); 1228 GFP_KERNEL);
1229 if (!miphy_phy) 1229 if (!miphy_phy) {
1230 return -ENOMEM; 1230 ret = -ENOMEM;
1231 goto put_child;
1232 }
1231 1233
1232 miphy_dev->phys[port] = miphy_phy; 1234 miphy_dev->phys[port] = miphy_phy;
1233 1235
1234 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); 1236 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops);
1235 if (IS_ERR(phy)) { 1237 if (IS_ERR(phy)) {
1236 dev_err(&pdev->dev, "failed to create PHY\n"); 1238 dev_err(&pdev->dev, "failed to create PHY\n");
1237 return PTR_ERR(phy); 1239 ret = PTR_ERR(phy);
1240 goto put_child;
1238 } 1241 }
1239 1242
1240 miphy_dev->phys[port]->phy = phy; 1243 miphy_dev->phys[port]->phy = phy;
@@ -1242,11 +1245,11 @@ static int miphy28lp_probe(struct platform_device *pdev)
1242 1245
1243 ret = miphy28lp_of_probe(child, miphy_phy); 1246 ret = miphy28lp_of_probe(child, miphy_phy);
1244 if (ret) 1247 if (ret)
1245 return ret; 1248 goto put_child;
1246 1249
1247 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); 1250 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]);
1248 if (ret) 1251 if (ret)
1249 return ret; 1252 goto put_child;
1250 1253
1251 phy_set_drvdata(phy, miphy_dev->phys[port]); 1254 phy_set_drvdata(phy, miphy_dev->phys[port]);
1252 port++; 1255 port++;
@@ -1255,6 +1258,9 @@ static int miphy28lp_probe(struct platform_device *pdev)
1255 1258
1256 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); 1259 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate);
1257 return PTR_ERR_OR_ZERO(provider); 1260 return PTR_ERR_OR_ZERO(provider);
1261put_child:
1262 of_node_put(child);
1263 return ret;
1258} 1264}
1259 1265
1260static const struct of_device_id miphy28lp_of_match[] = { 1266static const struct of_device_id miphy28lp_of_match[] = {
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 00a686a073ed..e661f3b36eaa 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -566,22 +566,25 @@ static int miphy365x_probe(struct platform_device *pdev)
566 566
567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
568 GFP_KERNEL); 568 GFP_KERNEL);
569 if (!miphy_phy) 569 if (!miphy_phy) {
570 return -ENOMEM; 570 ret = -ENOMEM;
571 goto put_child;
572 }
571 573
572 miphy_dev->phys[port] = miphy_phy; 574 miphy_dev->phys[port] = miphy_phy;
573 575
574 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); 576 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
575 if (IS_ERR(phy)) { 577 if (IS_ERR(phy)) {
576 dev_err(&pdev->dev, "failed to create PHY\n"); 578 dev_err(&pdev->dev, "failed to create PHY\n");
577 return PTR_ERR(phy); 579 ret = PTR_ERR(phy);
580 goto put_child;
578 } 581 }
579 582
580 miphy_dev->phys[port]->phy = phy; 583 miphy_dev->phys[port]->phy = phy;
581 584
582 ret = miphy365x_of_probe(child, miphy_phy); 585 ret = miphy365x_of_probe(child, miphy_phy);
583 if (ret) 586 if (ret)
584 return ret; 587 goto put_child;
585 588
586 phy_set_drvdata(phy, miphy_dev->phys[port]); 589 phy_set_drvdata(phy, miphy_dev->phys[port]);
587 590
@@ -591,12 +594,15 @@ static int miphy365x_probe(struct platform_device *pdev)
591 &miphy_phy->ctrlreg); 594 &miphy_phy->ctrlreg);
592 if (ret) { 595 if (ret) {
593 dev_err(&pdev->dev, "No sysconfig offset found\n"); 596 dev_err(&pdev->dev, "No sysconfig offset found\n");
594 return ret; 597 goto put_child;
595 } 598 }
596 } 599 }
597 600
598 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); 601 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
599 return PTR_ERR_OR_ZERO(provider); 602 return PTR_ERR_OR_ZERO(provider);
603put_child:
604 of_node_put(child);
605 return ret;
600} 606}
601 607
602static const struct of_device_id miphy365x_of_match[] = { 608static const struct of_device_id miphy365x_of_match[] = {
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c
index f30b28bd41fe..e427c3b788ff 100644
--- a/drivers/phy/phy-mt65xx-usb3.c
+++ b/drivers/phy/phy-mt65xx-usb3.c
@@ -415,7 +415,7 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
415 struct resource *sif_res; 415 struct resource *sif_res;
416 struct mt65xx_u3phy *u3phy; 416 struct mt65xx_u3phy *u3phy;
417 struct resource res; 417 struct resource res;
418 int port; 418 int port, retval;
419 419
420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); 420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL);
421 if (!u3phy) 421 if (!u3phy)
@@ -447,31 +447,34 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
447 for_each_child_of_node(np, child_np) { 447 for_each_child_of_node(np, child_np) {
448 struct mt65xx_phy_instance *instance; 448 struct mt65xx_phy_instance *instance;
449 struct phy *phy; 449 struct phy *phy;
450 int retval;
451 450
452 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); 451 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
453 if (!instance) 452 if (!instance) {
454 return -ENOMEM; 453 retval = -ENOMEM;
454 goto put_child;
455 }
455 456
456 u3phy->phys[port] = instance; 457 u3phy->phys[port] = instance;
457 458
458 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); 459 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops);
459 if (IS_ERR(phy)) { 460 if (IS_ERR(phy)) {
460 dev_err(dev, "failed to create phy\n"); 461 dev_err(dev, "failed to create phy\n");
461 return PTR_ERR(phy); 462 retval = PTR_ERR(phy);
463 goto put_child;
462 } 464 }
463 465
464 retval = of_address_to_resource(child_np, 0, &res); 466 retval = of_address_to_resource(child_np, 0, &res);
465 if (retval) { 467 if (retval) {
466 dev_err(dev, "failed to get address resource(id-%d)\n", 468 dev_err(dev, "failed to get address resource(id-%d)\n",
467 port); 469 port);
468 return retval; 470 goto put_child;
469 } 471 }
470 472
471 instance->port_base = devm_ioremap_resource(&phy->dev, &res); 473 instance->port_base = devm_ioremap_resource(&phy->dev, &res);
472 if (IS_ERR(instance->port_base)) { 474 if (IS_ERR(instance->port_base)) {
473 dev_err(dev, "failed to remap phy regs\n"); 475 dev_err(dev, "failed to remap phy regs\n");
474 return PTR_ERR(instance->port_base); 476 retval = PTR_ERR(instance->port_base);
477 goto put_child;
475 } 478 }
476 479
477 instance->phy = phy; 480 instance->phy = phy;
@@ -483,6 +486,9 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
483 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); 486 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate);
484 487
485 return PTR_ERR_OR_ZERO(provider); 488 return PTR_ERR_OR_ZERO(provider);
489put_child:
490 of_node_put(child_np);
491 return retval;
486} 492}
487 493
488static const struct of_device_id mt65xx_u3phy_id_table[] = { 494static const struct of_device_id mt65xx_u3phy_id_table[] = {
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 91d6f342c565..62c43c435194 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -108,13 +108,16 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
108 108
109 for_each_available_child_of_node(dev->of_node, child) { 109 for_each_available_child_of_node(dev->of_node, child) {
110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); 110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
111 if (!rk_phy) 111 if (!rk_phy) {
112 return -ENOMEM; 112 err = -ENOMEM;
113 goto put_child;
114 }
113 115
114 if (of_property_read_u32(child, "reg", &reg_offset)) { 116 if (of_property_read_u32(child, "reg", &reg_offset)) {
115 dev_err(dev, "missing reg property in node %s\n", 117 dev_err(dev, "missing reg property in node %s\n",
116 child->name); 118 child->name);
117 return -EINVAL; 119 err = -EINVAL;
120 goto put_child;
118 } 121 }
119 122
120 rk_phy->reg_offset = reg_offset; 123 rk_phy->reg_offset = reg_offset;
@@ -127,18 +130,22 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
127 rk_phy->phy = devm_phy_create(dev, child, &ops); 130 rk_phy->phy = devm_phy_create(dev, child, &ops);
128 if (IS_ERR(rk_phy->phy)) { 131 if (IS_ERR(rk_phy->phy)) {
129 dev_err(dev, "failed to create PHY\n"); 132 dev_err(dev, "failed to create PHY\n");
130 return PTR_ERR(rk_phy->phy); 133 err = PTR_ERR(rk_phy->phy);
134 goto put_child;
131 } 135 }
132 phy_set_drvdata(rk_phy->phy, rk_phy); 136 phy_set_drvdata(rk_phy->phy, rk_phy);
133 137
134 /* only power up usb phy when it use, so disable it when init*/ 138 /* only power up usb phy when it use, so disable it when init*/
135 err = rockchip_usb_phy_power(rk_phy, 1); 139 err = rockchip_usb_phy_power(rk_phy, 1);
136 if (err) 140 if (err)
137 return err; 141 goto put_child;
138 } 142 }
139 143
140 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 144 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
141 return PTR_ERR_OR_ZERO(phy_provider); 145 return PTR_ERR_OR_ZERO(phy_provider);
146put_child:
147 of_node_put(child);
148 return err;
142} 149}
143 150
144static const struct of_device_id rockchip_usb_phy_dt_ids[] = { 151static const struct of_device_id rockchip_usb_phy_dt_ids[] = {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index a1ea565fcd46..2e6ca69635aa 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -342,12 +342,6 @@ static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset); 342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset);
343} 343}
344 344
345static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
346 unsigned offset, int value)
347{
348 return pinctrl_gpio_direction_output(chip->base + offset);
349}
350
351static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 345static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
352{ 346{
353 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); 347 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
@@ -355,6 +349,13 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
355 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); 349 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
356} 350}
357 351
352static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
353 unsigned offset, int value)
354{
355 bcm2835_gpio_set(chip, offset, value);
356 return pinctrl_gpio_direction_output(chip->base + offset);
357}
358
358static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 359static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
359{ 360{
360 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); 361 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 37a037543d29..587d1ff6210e 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = {
299static struct imx_pinctrl_soc_info vf610_pinctrl_info = { 299static struct imx_pinctrl_soc_info vf610_pinctrl_info = {
300 .pins = vf610_pinctrl_pads, 300 .pins = vf610_pinctrl_pads,
301 .npins = ARRAY_SIZE(vf610_pinctrl_pads), 301 .npins = ARRAY_SIZE(vf610_pinctrl_pads),
302 .flags = SHARE_MUX_CONF_REG, 302 .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID,
303}; 303};
304 304
305static const struct of_device_id vf610_pinctrl_of_match[] = { 305static const struct of_device_id vf610_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index e42d5d4183f5..5979d38c46b2 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -28,6 +28,7 @@
28 .padcfglock_offset = BXT_PADCFGLOCK, \ 28 .padcfglock_offset = BXT_PADCFGLOCK, \
29 .hostown_offset = BXT_HOSTSW_OWN, \ 29 .hostown_offset = BXT_HOSTSW_OWN, \
30 .ie_offset = BXT_GPI_IE, \ 30 .ie_offset = BXT_GPI_IE, \
31 .gpp_size = 32, \
31 .pin_base = (s), \ 32 .pin_base = (s), \
32 .npins = ((e) - (s) + 1), \ 33 .npins = ((e) - (s) + 1), \
33 } 34 }
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 392e28d3f48d..26f6b6ffea5b 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -25,9 +25,6 @@
25 25
26#include "pinctrl-intel.h" 26#include "pinctrl-intel.h"
27 27
28/* Maximum number of pads in each group */
29#define NPADS_IN_GPP 24
30
31/* Offset from regs */ 28/* Offset from regs */
32#define PADBAR 0x00c 29#define PADBAR 0x00c
33#define GPI_IS 0x100 30#define GPI_IS 0x100
@@ -37,6 +34,7 @@
37#define PADOWN_BITS 4 34#define PADOWN_BITS 4
38#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) 35#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
39#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) 36#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p))
37#define PADOWN_GPP(p) ((p) / 8)
40 38
41/* Offset from pad_regs */ 39/* Offset from pad_regs */
42#define PADCFG0 0x000 40#define PADCFG0 0x000
@@ -142,7 +140,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
142static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) 140static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
143{ 141{
144 const struct intel_community *community; 142 const struct intel_community *community;
145 unsigned padno, gpp, gpp_offset, offset; 143 unsigned padno, gpp, offset, group;
146 void __iomem *padown; 144 void __iomem *padown;
147 145
148 community = intel_get_community(pctrl, pin); 146 community = intel_get_community(pctrl, pin);
@@ -152,9 +150,9 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
152 return true; 150 return true;
153 151
154 padno = pin_to_padno(community, pin); 152 padno = pin_to_padno(community, pin);
155 gpp = padno / NPADS_IN_GPP; 153 group = padno / community->gpp_size;
156 gpp_offset = padno % NPADS_IN_GPP; 154 gpp = PADOWN_GPP(padno % community->gpp_size);
157 offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; 155 offset = community->padown_offset + 0x10 * group + gpp * 4;
158 padown = community->regs + offset; 156 padown = community->regs + offset;
159 157
160 return !(readl(padown) & PADOWN_MASK(padno)); 158 return !(readl(padown) & PADOWN_MASK(padno));
@@ -173,11 +171,11 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
173 return false; 171 return false;
174 172
175 padno = pin_to_padno(community, pin); 173 padno = pin_to_padno(community, pin);
176 gpp = padno / NPADS_IN_GPP; 174 gpp = padno / community->gpp_size;
177 offset = community->hostown_offset + gpp * 4; 175 offset = community->hostown_offset + gpp * 4;
178 hostown = community->regs + offset; 176 hostown = community->regs + offset;
179 177
180 return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); 178 return !(readl(hostown) & BIT(padno % community->gpp_size));
181} 179}
182 180
183static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) 181static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
@@ -193,7 +191,7 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
193 return false; 191 return false;
194 192
195 padno = pin_to_padno(community, pin); 193 padno = pin_to_padno(community, pin);
196 gpp = padno / NPADS_IN_GPP; 194 gpp = padno / community->gpp_size;
197 195
198 /* 196 /*
199 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, 197 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
@@ -202,12 +200,12 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
202 */ 200 */
203 offset = community->padcfglock_offset + gpp * 8; 201 offset = community->padcfglock_offset + gpp * 8;
204 value = readl(community->regs + offset); 202 value = readl(community->regs + offset);
205 if (value & BIT(pin % NPADS_IN_GPP)) 203 if (value & BIT(pin % community->gpp_size))
206 return true; 204 return true;
207 205
208 offset = community->padcfglock_offset + 4 + gpp * 8; 206 offset = community->padcfglock_offset + 4 + gpp * 8;
209 value = readl(community->regs + offset); 207 value = readl(community->regs + offset);
210 if (value & BIT(pin % NPADS_IN_GPP)) 208 if (value & BIT(pin % community->gpp_size))
211 return true; 209 return true;
212 210
213 return false; 211 return false;
@@ -663,8 +661,8 @@ static void intel_gpio_irq_ack(struct irq_data *d)
663 community = intel_get_community(pctrl, pin); 661 community = intel_get_community(pctrl, pin);
664 if (community) { 662 if (community) {
665 unsigned padno = pin_to_padno(community, pin); 663 unsigned padno = pin_to_padno(community, pin);
666 unsigned gpp_offset = padno % NPADS_IN_GPP; 664 unsigned gpp_offset = padno % community->gpp_size;
667 unsigned gpp = padno / NPADS_IN_GPP; 665 unsigned gpp = padno / community->gpp_size;
668 666
669 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); 667 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
670 } 668 }
@@ -685,8 +683,8 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
685 community = intel_get_community(pctrl, pin); 683 community = intel_get_community(pctrl, pin);
686 if (community) { 684 if (community) {
687 unsigned padno = pin_to_padno(community, pin); 685 unsigned padno = pin_to_padno(community, pin);
688 unsigned gpp_offset = padno % NPADS_IN_GPP; 686 unsigned gpp_offset = padno % community->gpp_size;
689 unsigned gpp = padno / NPADS_IN_GPP; 687 unsigned gpp = padno / community->gpp_size;
690 void __iomem *reg; 688 void __iomem *reg;
691 u32 value; 689 u32 value;
692 690
@@ -780,8 +778,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
780 return -EINVAL; 778 return -EINVAL;
781 779
782 padno = pin_to_padno(community, pin); 780 padno = pin_to_padno(community, pin);
783 gpp = padno / NPADS_IN_GPP; 781 gpp = padno / community->gpp_size;
784 gpp_offset = padno % NPADS_IN_GPP; 782 gpp_offset = padno % community->gpp_size;
785 783
786 /* Clear the existing wake status */ 784 /* Clear the existing wake status */
787 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); 785 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4);
@@ -819,14 +817,14 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
819 /* Only interrupts that are enabled */ 817 /* Only interrupts that are enabled */
820 pending &= enabled; 818 pending &= enabled;
821 819
822 for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { 820 for_each_set_bit(gpp_offset, &pending, community->gpp_size) {
823 unsigned padno, irq; 821 unsigned padno, irq;
824 822
825 /* 823 /*
826 * The last group in community can have less pins 824 * The last group in community can have less pins
827 * than NPADS_IN_GPP. 825 * than NPADS_IN_GPP.
828 */ 826 */
829 padno = gpp_offset + gpp * NPADS_IN_GPP; 827 padno = gpp_offset + gpp * community->gpp_size;
830 if (padno >= community->npins) 828 if (padno >= community->npins)
831 break; 829 break;
832 830
@@ -1002,7 +1000,8 @@ int intel_pinctrl_probe(struct platform_device *pdev,
1002 1000
1003 community->regs = regs; 1001 community->regs = regs;
1004 community->pad_regs = regs + padbar; 1002 community->pad_regs = regs + padbar;
1005 community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); 1003 community->ngpps = DIV_ROUND_UP(community->npins,
1004 community->gpp_size);
1006 } 1005 }
1007 1006
1008 irq = platform_get_irq(pdev, 0); 1007 irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 4ec8b572a288..b60215793017 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -55,6 +55,8 @@ struct intel_function {
55 * ACPI). 55 * ACPI).
56 * @ie_offset: Register offset of GPI_IE from @regs. 56 * @ie_offset: Register offset of GPI_IE from @regs.
57 * @pin_base: Starting pin of pins in this community 57 * @pin_base: Starting pin of pins in this community
58 * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
59 * HOSTSW_OWN, GPI_IS, GPI_IE, etc.
58 * @npins: Number of pins in this community 60 * @npins: Number of pins in this community
59 * @regs: Community specific common registers (reserved for core driver) 61 * @regs: Community specific common registers (reserved for core driver)
60 * @pad_regs: Community specific pad registers (reserved for core driver) 62 * @pad_regs: Community specific pad registers (reserved for core driver)
@@ -68,6 +70,7 @@ struct intel_community {
68 unsigned hostown_offset; 70 unsigned hostown_offset;
69 unsigned ie_offset; 71 unsigned ie_offset;
70 unsigned pin_base; 72 unsigned pin_base;
73 unsigned gpp_size;
71 size_t npins; 74 size_t npins;
72 void __iomem *regs; 75 void __iomem *regs;
73 void __iomem *pad_regs; 76 void __iomem *pad_regs;
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 1de9ae5010db..c725a5313b4e 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -30,6 +30,7 @@
30 .padcfglock_offset = SPT_PADCFGLOCK, \ 30 .padcfglock_offset = SPT_PADCFGLOCK, \
31 .hostown_offset = SPT_HOSTSW_OWN, \ 31 .hostown_offset = SPT_HOSTSW_OWN, \
32 .ie_offset = SPT_GPI_IE, \ 32 .ie_offset = SPT_GPI_IE, \
33 .gpp_size = 24, \
33 .pin_base = (s), \ 34 .pin_base = (s), \
34 .npins = ((e) - (s) + 1), \ 35 .npins = ((e) - (s) + 1), \
35 } 36 }
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 976efeb3f2ba..2b921dea10f4 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com> 4 * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com>
5 * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de> 5 * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de>
6 * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,24 @@
26#include <acpi/video.h> 27#include <acpi/video.h>
27#include <asm/io.h> 28#include <asm/io.h>
28 29
30/**
31 * DOC: Overview
32 *
33 * :1: http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
34 * :2: http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
35 *
36 * gmux is a microcontroller built into the MacBook Pro to support dual GPUs:
37 * A {1}[Lattice XP2] on pre-retinas, a {2}[Renesas R4F2113] on retinas.
38 *
39 * (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has
40 * dual GPUs but no built-in display.)
41 *
42 * gmux is connected to the LPC bus of the southbridge. Its I/O ports are
43 * accessed differently depending on the microcontroller: Driver functions
44 * to access a pre-retina gmux are infixed `_pio_`, those for a retina gmux
45 * are infixed `_index_`.
46 */
47
29struct apple_gmux_data { 48struct apple_gmux_data {
30 unsigned long iostart; 49 unsigned long iostart;
31 unsigned long iolen; 50 unsigned long iolen;
@@ -247,6 +266,20 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
247 return false; 266 return false;
248} 267}
249 268
269/**
270 * DOC: Backlight control
271 *
272 * :3: http://www.ti.com/lit/ds/symlink/lp8543.pdf
273 * :4: http://www.ti.com/lit/ds/symlink/lp8545.pdf
274 *
275 * On single GPU MacBooks, the PWM signal for the backlight is generated by
276 * the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended
277 * to conserve energy. Hence the PWM signal needs to be generated by a separate
278 * backlight driver which is controlled by gmux. The earliest generation
279 * MBP5 2008/09 uses a {3}[TI LP8543] backlight driver. All newer models
280 * use a {4}[TI LP8545].
281 */
282
250static int gmux_get_brightness(struct backlight_device *bd) 283static int gmux_get_brightness(struct backlight_device *bd)
251{ 284{
252 struct apple_gmux_data *gmux_data = bl_get_data(bd); 285 struct apple_gmux_data *gmux_data = bl_get_data(bd);
@@ -273,6 +306,68 @@ static const struct backlight_ops gmux_bl_ops = {
273 .update_status = gmux_update_status, 306 .update_status = gmux_update_status,
274}; 307};
275 308
309/**
310 * DOC: Graphics mux
311 *
312 * :5: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
313 * :6: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
314 * :7: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
315 * :8: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
316 * :9: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
317 * :10: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
318 * :11: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
319 *
320 * On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes
321 * either of them to the panel. One of the tricks gmux has up its sleeve is
322 * to lengthen the blanking interval of its output during a switch to
323 * synchronize it with the GPU switched to. This allows for a flicker-free
324 * switch that is imperceptible by the user ({5}[US 8,687,007 B2]).
325 *
326 * On retinas, muxing is no longer done by gmux itself, but by a separate
327 * chip which is controlled by gmux. The chip is triple sourced, it is
328 * either an {6}[NXP CBTL06142], {7}[TI HD3SS212] or {8}[Pericom PI3VDP12412].
329 * The panel is driven with eDP instead of LVDS since the pixel clock
330 * required for retina resolution exceeds LVDS' limits.
331 *
332 * Pre-retinas are able to switch the panel's DDC pins separately.
333 * This is handled by a {9}[TI SN74LV4066A] which is controlled by gmux.
334 * The inactive GPU can thus probe the panel's EDID without switching over
335 * the entire panel. Retinas lack this functionality as the chips used for
336 * eDP muxing are incapable of switching the AUX channel separately (see
337 * the linked data sheets, Pericom would be capable but this is unused).
338 * However the retina panel has the NO_AUX_HANDSHAKE_LINK_TRAINING bit set
339 * in its DPCD, allowing the inactive GPU to skip the AUX handshake and
340 * set up the output with link parameters pre-calibrated by the active GPU.
341 *
342 * The external DP port is only fully switchable on the first two unibody
343 * MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an
344 * {6}[NXP CBTL06141] which is controlled by gmux. It's the predecessor of the
345 * eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s.
346 *
347 * The following MacBook Pro generations replaced the external DP port with a
348 * combined DP/Thunderbolt port and lost the ability to switch it between GPUs,
349 * connecting it either to the discrete GPU or the Thunderbolt controller.
350 * Oddly enough, while the full port is no longer switchable, AUX and HPD
351 * are still switchable by way of an {10}[NXP CBTL03062] (on pre-retinas
352 * MBP8 2011 and MBP9 2012) or two {11}[TI TS3DS10224] (on retinas) under the
353 * control of gmux. Since the integrated GPU is missing the main link,
354 * external displays appear to it as phantoms which fail to link-train.
355 *
356 * gmux receives the HPD signal of all display connectors and sends an
357 * interrupt on hotplug. On generations which cannot switch external ports,
358 * the discrete GPU can then be woken to drive the newly connected display.
359 * The ability to switch AUX on these generations could be used to improve
360 * reliability of hotplug detection by having the integrated GPU poll the
361 * ports while the discrete GPU is asleep, but currently we do not make use
362 * of this feature.
363 *
364 * gmux' initial switch state on bootup is user configurable via the EFI
365 * variable `gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9` (5th byte,
366 * 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to
367 * switch the panel and the external DP connector and allocates a framebuffer
368 * for the selected GPU.
369 */
370
276static int gmux_switchto(enum vga_switcheroo_client_id id) 371static int gmux_switchto(enum vga_switcheroo_client_id id)
277{ 372{
278 if (id == VGA_SWITCHEROO_IGD) { 373 if (id == VGA_SWITCHEROO_IGD) {
@@ -288,6 +383,14 @@ static int gmux_switchto(enum vga_switcheroo_client_id id)
288 return 0; 383 return 0;
289} 384}
290 385
386/**
387 * DOC: Power control
388 *
389 * gmux is able to cut power to the discrete GPU. It automatically takes care
390 * of the correct sequence to tear down and bring up the power rails for
391 * core voltage, VRAM and PCIe.
392 */
393
291static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, 394static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
292 enum vga_switcheroo_state state) 395 enum vga_switcheroo_state state)
293{ 396{
@@ -352,6 +455,16 @@ static const struct vga_switcheroo_handler gmux_handler = {
352 .get_client_id = gmux_get_client_id, 455 .get_client_id = gmux_get_client_id,
353}; 456};
354 457
458/**
459 * DOC: Interrupt
460 *
461 * gmux is also connected to a GPIO pin of the southbridge and thereby is able
462 * to trigger an ACPI GPE. On the MBP5 2008/09 it's GPIO pin 22 of the Nvidia
463 * MCP79, on all following generations it's GPIO pin 6 of the Intel PCH.
464 * The GPE merely signals that an interrupt occurred, the actual type of event
465 * is identified by reading a gmux register.
466 */
467
355static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) 468static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data)
356{ 469{
357 gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, 470 gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cc97f0869791..48747c28a43d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1341,10 +1341,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
1341 1341
1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
1343 /* check if the domain is locked by BIOS */ 1343 /* check if the domain is locked by BIOS */
1344 if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { 1344 ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
1345 if (ret)
1346 return ret;
1347 if (locked) {
1345 pr_info("RAPL package %d domain %s locked by BIOS\n", 1348 pr_info("RAPL package %d domain %s locked by BIOS\n",
1346 rp->id, rd->name); 1349 rp->id, rd->name);
1347 rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1350 rd->state |= DOMAIN_STATE_BIOS_LOCKED;
1348 } 1351 }
1349 } 1352 }
1350 1353
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 284b587da65c..d6c853bbfa9f 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -483,24 +483,23 @@ static int da9063_rtc_probe(struct platform_device *pdev)
483 483
484 platform_set_drvdata(pdev, rtc); 484 platform_set_drvdata(pdev, rtc);
485 485
486 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
487 &da9063_rtc_ops, THIS_MODULE);
488 if (IS_ERR(rtc->rtc_dev))
489 return PTR_ERR(rtc->rtc_dev);
490
491 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
492 rtc->rtc_sync = false;
493
486 irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 494 irq_alarm = platform_get_irq_byname(pdev, "ALARM");
487 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 495 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
488 da9063_alarm_event, 496 da9063_alarm_event,
489 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 497 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
490 "ALARM", rtc); 498 "ALARM", rtc);
491 if (ret) { 499 if (ret)
492 dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", 500 dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
493 irq_alarm, ret); 501 irq_alarm, ret);
494 return ret;
495 }
496
497 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
498 &da9063_rtc_ops, THIS_MODULE);
499 if (IS_ERR(rtc->rtc_dev))
500 return PTR_ERR(rtc->rtc_dev);
501 502
502 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
503 rtc->rtc_sync = false;
504 return ret; 503 return ret;
505} 504}
506 505
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index 91ca0bc1b484..35c9aada07c8 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -56,6 +56,42 @@ struct rk808_rtc {
56 int irq; 56 int irq;
57}; 57};
58 58
59/*
60 * The Rockchip calendar used by the RK808 counts November with 31 days. We use
61 * these translation functions to convert its dates to/from the Gregorian
62 * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016
63 * as the day when both calendars were in sync, and treat all other dates
64 * relative to that.
65 * NOTE: Other system software (e.g. firmware) that reads the same hardware must
66 * implement this exact same conversion algorithm, with the same anchor date.
67 */
68static time64_t nov2dec_transitions(struct rtc_time *tm)
69{
70 return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0);
71}
72
73static void rockchip_to_gregorian(struct rtc_time *tm)
74{
75 /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */
76 time64_t time = rtc_tm_to_time64(tm);
77 rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm);
78}
79
80static void gregorian_to_rockchip(struct rtc_time *tm)
81{
82 time64_t extra_days = nov2dec_transitions(tm);
83 time64_t time = rtc_tm_to_time64(tm);
84 rtc_time64_to_tm(time - extra_days * 86400, tm);
85
86 /* Compensate if we went back over Nov 31st (will work up to 2381) */
87 if (nov2dec_transitions(tm) < extra_days) {
88 if (tm->tm_mon + 1 == 11)
89 tm->tm_mday++; /* This may result in 31! */
90 else
91 rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm);
92 }
93}
94
59/* Read current time and date in RTC */ 95/* Read current time and date in RTC */
60static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) 96static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
61{ 97{
@@ -101,9 +137,10 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
101 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; 137 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1;
102 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; 138 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100;
103 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); 139 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK);
140 rockchip_to_gregorian(tm);
104 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 141 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
105 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 142 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
106 tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); 143 tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
107 144
108 return ret; 145 return ret;
109} 146}
@@ -116,6 +153,10 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
116 u8 rtc_data[NUM_TIME_REGS]; 153 u8 rtc_data[NUM_TIME_REGS];
117 int ret; 154 int ret;
118 155
156 dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
157 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
158 tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
159 gregorian_to_rockchip(tm);
119 rtc_data[0] = bin2bcd(tm->tm_sec); 160 rtc_data[0] = bin2bcd(tm->tm_sec);
120 rtc_data[1] = bin2bcd(tm->tm_min); 161 rtc_data[1] = bin2bcd(tm->tm_min);
121 rtc_data[2] = bin2bcd(tm->tm_hour); 162 rtc_data[2] = bin2bcd(tm->tm_hour);
@@ -123,9 +164,6 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
123 rtc_data[4] = bin2bcd(tm->tm_mon + 1); 164 rtc_data[4] = bin2bcd(tm->tm_mon + 1);
124 rtc_data[5] = bin2bcd(tm->tm_year - 100); 165 rtc_data[5] = bin2bcd(tm->tm_year - 100);
125 rtc_data[6] = bin2bcd(tm->tm_wday); 166 rtc_data[6] = bin2bcd(tm->tm_wday);
126 dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
127 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
128 tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec);
129 167
130 /* Stop RTC while updating the RTC registers */ 168 /* Stop RTC while updating the RTC registers */
131 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, 169 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG,
@@ -170,6 +208,7 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
170 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); 208 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK);
171 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; 209 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1;
172 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; 210 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100;
211 rockchip_to_gregorian(&alrm->time);
173 212
174 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); 213 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg);
175 if (ret) { 214 if (ret) {
@@ -227,6 +266,7 @@ static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
227 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, 266 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour,
228 alrm->time.tm_min, alrm->time.tm_sec); 267 alrm->time.tm_min, alrm->time.tm_sec);
229 268
269 gregorian_to_rockchip(&alrm->time);
230 alrm_data[0] = bin2bcd(alrm->time.tm_sec); 270 alrm_data[0] = bin2bcd(alrm->time.tm_sec);
231 alrm_data[1] = bin2bcd(alrm->time.tm_min); 271 alrm_data[1] = bin2bcd(alrm->time.tm_min);
232 alrm_data[2] = bin2bcd(alrm->time.tm_hour); 272 alrm_data[2] = bin2bcd(alrm->time.tm_hour);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 61f768518a34..24ec282e15d8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -599,8 +599,10 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
599 status = ap_sm_recv(ap_dev); 599 status = ap_sm_recv(ap_dev);
600 switch (status.response_code) { 600 switch (status.response_code) {
601 case AP_RESPONSE_NORMAL: 601 case AP_RESPONSE_NORMAL:
602 if (ap_dev->queue_count > 0) 602 if (ap_dev->queue_count > 0) {
603 ap_dev->state = AP_STATE_WORKING;
603 return AP_WAIT_AGAIN; 604 return AP_WAIT_AGAIN;
605 }
604 ap_dev->state = AP_STATE_IDLE; 606 ap_dev->state = AP_STATE_IDLE;
605 return AP_WAIT_NONE; 607 return AP_WAIT_NONE;
606 case AP_RESPONSE_NO_PENDING_REPLY: 608 case AP_RESPONSE_NO_PENDING_REPLY:
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index b2a1a81e6fc8..1b831598df7c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -984,6 +984,36 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
984 return vq; 984 return vq;
985} 985}
986 986
987static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
988 __u32 activity)
989{
990 if (vcdev->curr_io & activity) {
991 switch (activity) {
992 case VIRTIO_CCW_DOING_READ_FEAT:
993 case VIRTIO_CCW_DOING_WRITE_FEAT:
994 case VIRTIO_CCW_DOING_READ_CONFIG:
995 case VIRTIO_CCW_DOING_WRITE_CONFIG:
996 case VIRTIO_CCW_DOING_WRITE_STATUS:
997 case VIRTIO_CCW_DOING_SET_VQ:
998 case VIRTIO_CCW_DOING_SET_IND:
999 case VIRTIO_CCW_DOING_SET_CONF_IND:
1000 case VIRTIO_CCW_DOING_RESET:
1001 case VIRTIO_CCW_DOING_READ_VQ_CONF:
1002 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1003 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
1004 vcdev->curr_io &= ~activity;
1005 wake_up(&vcdev->wait_q);
1006 break;
1007 default:
1008 /* don't know what to do... */
1009 dev_warn(&vcdev->cdev->dev,
1010 "Suspicious activity '%08x'\n", activity);
1011 WARN_ON(1);
1012 break;
1013 }
1014 }
1015}
1016
987static void virtio_ccw_int_handler(struct ccw_device *cdev, 1017static void virtio_ccw_int_handler(struct ccw_device *cdev,
988 unsigned long intparm, 1018 unsigned long intparm,
989 struct irb *irb) 1019 struct irb *irb)
@@ -995,6 +1025,12 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
995 1025
996 if (!vcdev) 1026 if (!vcdev)
997 return; 1027 return;
1028 if (IS_ERR(irb)) {
1029 vcdev->err = PTR_ERR(irb);
1030 virtio_ccw_check_activity(vcdev, activity);
1031 /* Don't poke around indicators, something's wrong. */
1032 return;
1033 }
998 /* Check if it's a notification from the host. */ 1034 /* Check if it's a notification from the host. */
999 if ((intparm == 0) && 1035 if ((intparm == 0) &&
1000 (scsw_stctl(&irb->scsw) == 1036 (scsw_stctl(&irb->scsw) ==
@@ -1010,31 +1046,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
1010 /* Map everything else to -EIO. */ 1046 /* Map everything else to -EIO. */
1011 vcdev->err = -EIO; 1047 vcdev->err = -EIO;
1012 } 1048 }
1013 if (vcdev->curr_io & activity) { 1049 virtio_ccw_check_activity(vcdev, activity);
1014 switch (activity) {
1015 case VIRTIO_CCW_DOING_READ_FEAT:
1016 case VIRTIO_CCW_DOING_WRITE_FEAT:
1017 case VIRTIO_CCW_DOING_READ_CONFIG:
1018 case VIRTIO_CCW_DOING_WRITE_CONFIG:
1019 case VIRTIO_CCW_DOING_WRITE_STATUS:
1020 case VIRTIO_CCW_DOING_SET_VQ:
1021 case VIRTIO_CCW_DOING_SET_IND:
1022 case VIRTIO_CCW_DOING_SET_CONF_IND:
1023 case VIRTIO_CCW_DOING_RESET:
1024 case VIRTIO_CCW_DOING_READ_VQ_CONF:
1025 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1026 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
1027 vcdev->curr_io &= ~activity;
1028 wake_up(&vcdev->wait_q);
1029 break;
1030 default:
1031 /* don't know what to do... */
1032 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
1033 activity);
1034 WARN_ON(1);
1035 break;
1036 }
1037 }
1038 for_each_set_bit(i, &vcdev->indicators, 1050 for_each_set_bit(i, &vcdev->indicators,
1039 sizeof(vcdev->indicators) * BITS_PER_BYTE) { 1051 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
1040 /* The bit clear must happen before the vring kick. */ 1052 /* The bit clear must happen before the vring kick. */
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index e4b799837948..459abe1dcc87 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -219,13 +219,13 @@ static int sdev_runtime_suspend(struct device *dev)
219 struct scsi_device *sdev = to_scsi_device(dev); 219 struct scsi_device *sdev = to_scsi_device(dev);
220 int err = 0; 220 int err = 0;
221 221
222 if (pm && pm->runtime_suspend) { 222 err = blk_pre_runtime_suspend(sdev->request_queue);
223 err = blk_pre_runtime_suspend(sdev->request_queue); 223 if (err)
224 if (err) 224 return err;
225 return err; 225 if (pm && pm->runtime_suspend)
226 err = pm->runtime_suspend(dev); 226 err = pm->runtime_suspend(dev);
227 blk_post_runtime_suspend(sdev->request_queue, err); 227 blk_post_runtime_suspend(sdev->request_queue, err);
228 } 228
229 return err; 229 return err;
230} 230}
231 231
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
249 int err = 0; 249 int err = 0;
250 250
251 if (pm && pm->runtime_resume) { 251 blk_pre_runtime_resume(sdev->request_queue);
252 blk_pre_runtime_resume(sdev->request_queue); 252 if (pm && pm->runtime_resume)
253 err = pm->runtime_resume(dev); 253 err = pm->runtime_resume(dev);
254 blk_post_runtime_resume(sdev->request_queue, err); 254 blk_post_runtime_resume(sdev->request_queue, err);
255 } 255
256 return err; 256 return err;
257} 257}
258 258
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index dcb0d76d7312..044d06410d4c 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -84,6 +84,7 @@ static void init_device_slot_control(unsigned char *dest_desc,
84static int ses_recv_diag(struct scsi_device *sdev, int page_code, 84static int ses_recv_diag(struct scsi_device *sdev, int page_code,
85 void *buf, int bufflen) 85 void *buf, int bufflen)
86{ 86{
87 int ret;
87 unsigned char cmd[] = { 88 unsigned char cmd[] = {
88 RECEIVE_DIAGNOSTIC, 89 RECEIVE_DIAGNOSTIC,
89 1, /* Set PCV bit */ 90 1, /* Set PCV bit */
@@ -92,9 +93,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
92 bufflen & 0xff, 93 bufflen & 0xff,
93 0 94 0
94 }; 95 };
96 unsigned char recv_page_code;
95 97
96 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 98 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
97 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 99 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
100 if (unlikely(!ret))
101 return ret;
102
103 recv_page_code = ((unsigned char *)buf)[0];
104
105 if (likely(recv_page_code == page_code))
106 return ret;
107
108 /* successful diagnostic but wrong page code. This happens to some
109 * USB devices, just print a message and pretend there was an error */
110
111 sdev_printk(KERN_ERR, sdev,
112 "Wrong diagnostic page; asked for %d got %u\n",
113 page_code, recv_page_code);
114
115 return -EINVAL;
98} 116}
99 117
100static int ses_send_diag(struct scsi_device *sdev, int page_code, 118static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -541,7 +559,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
541 if (desc_ptr) 559 if (desc_ptr)
542 desc_ptr += len; 560 desc_ptr += len;
543 561
544 if (addl_desc_ptr) 562 if (addl_desc_ptr &&
563 /* only find additional descriptions for specific devices */
564 (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
565 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
566 type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
567 /* these elements are optional */
568 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
569 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
570 type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
545 addl_desc_ptr += addl_desc_ptr[1] + 2; 571 addl_desc_ptr += addl_desc_ptr[1] + 2;
546 572
547 } 573 }
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 59a11437db70..39412c9097c6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -167,7 +167,7 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
167{ 167{
168 unsigned int val; 168 unsigned int val;
169 169
170 regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val); 170 regmap_read(dspi->regmap, SPI_CTAR(0), &val);
171 171
172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; 172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
173} 173}
@@ -257,7 +257,7 @@ static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
257 257
258 return SPI_PUSHR_TXDATA(d16) | 258 return SPI_PUSHR_TXDATA(d16) |
259 SPI_PUSHR_PCS(dspi->cs) | 259 SPI_PUSHR_PCS(dspi->cs) |
260 SPI_PUSHR_CTAS(dspi->cs) | 260 SPI_PUSHR_CTAS(0) |
261 SPI_PUSHR_CONT; 261 SPI_PUSHR_CONT;
262} 262}
263 263
@@ -290,7 +290,7 @@ static int dspi_eoq_write(struct fsl_dspi *dspi)
290 */ 290 */
291 if (tx_word && (dspi->len == 1)) { 291 if (tx_word && (dspi->len == 1)) {
292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
293 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 293 regmap_update_bits(dspi->regmap, SPI_CTAR(0),
294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
295 tx_word = 0; 295 tx_word = 0;
296 } 296 }
@@ -339,7 +339,7 @@ static int dspi_tcfq_write(struct fsl_dspi *dspi)
339 339
340 if (tx_word && (dspi->len == 1)) { 340 if (tx_word && (dspi->len == 1)) {
341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
342 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 342 regmap_update_bits(dspi->regmap, SPI_CTAR(0),
343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
344 tx_word = 0; 344 tx_word = 0;
345 } 345 }
@@ -407,7 +407,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
407 regmap_update_bits(dspi->regmap, SPI_MCR, 407 regmap_update_bits(dspi->regmap, SPI_MCR,
408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
410 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 410 regmap_write(dspi->regmap, SPI_CTAR(0),
411 dspi->cur_chip->ctar_val); 411 dspi->cur_chip->ctar_val);
412 412
413 trans_mode = dspi->devtype_data->trans_mode; 413 trans_mode = dspi->devtype_data->trans_mode;
@@ -566,7 +566,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
566 if (!dspi->len) { 566 if (!dspi->len) {
567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { 567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
568 regmap_update_bits(dspi->regmap, 568 regmap_update_bits(dspi->regmap,
569 SPI_CTAR(dspi->cs), 569 SPI_CTAR(0),
570 SPI_FRAME_BITS_MASK, 570 SPI_FRAME_BITS_MASK,
571 SPI_FRAME_BITS(16)); 571 SPI_FRAME_BITS(16));
572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM; 572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2b0a8ec3affb..dee1cb87d24f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1705,7 +1705,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1705 master->bus_num = -1; 1705 master->bus_num = -1;
1706 master->num_chipselect = 1; 1706 master->num_chipselect = 1;
1707 master->dev.class = &spi_master_class; 1707 master->dev.class = &spi_master_class;
1708 master->dev.parent = get_device(dev); 1708 master->dev.parent = dev;
1709 spi_master_set_devdata(master, &master[1]); 1709 spi_master_set_devdata(master, &master[1]);
1710 1710
1711 return master; 1711 return master;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 91a0fcd72423..d0e7dfc647cf 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,11 +651,11 @@ static int spidev_release(struct inode *inode, struct file *filp)
651 kfree(spidev->rx_buffer); 651 kfree(spidev->rx_buffer);
652 spidev->rx_buffer = NULL; 652 spidev->rx_buffer = NULL;
653 653
654 spin_lock_irq(&spidev->spi_lock);
654 if (spidev->spi) 655 if (spidev->spi)
655 spidev->speed_hz = spidev->spi->max_speed_hz; 656 spidev->speed_hz = spidev->spi->max_speed_hz;
656 657
657 /* ... after we unbound from the underlying device? */ 658 /* ... after we unbound from the underlying device? */
658 spin_lock_irq(&spidev->spi_lock);
659 dofree = (spidev->spi == NULL); 659 dofree = (spidev->spi == NULL);
660 spin_unlock_irq(&spidev->spi_lock); 660 spin_unlock_irq(&spidev->spi_lock);
661 661
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 195c41d7bd53..0813163f962f 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
81err: 81err:
82 sg = table->sgl; 82 sg = table->sgl;
83 for (i -= 1; i >= 0; i--) { 83 for (i -= 1; i >= 0; i--) {
84 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
85 sg->length); 85 sg->length);
86 sg = sg_next(sg); 86 sg = sg_next(sg);
87 } 87 }
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
109 DMA_BIDIRECTIONAL); 109 DMA_BIDIRECTIONAL);
110 110
111 for_each_sg(table->sgl, sg, table->nents, i) { 111 for_each_sg(table->sgl, sg, table->nents, i) {
112 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
113 sg->length); 113 sg->length);
114 } 114 }
115 chunk_heap->allocated -= allocated_size; 115 chunk_heap->allocated -= allocated_size;
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index bfbf1c56bd22..6eb600ff7056 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -159,7 +159,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
159 struct iio_dummy_state *st = iio_priv(indio_dev); 159 struct iio_dummy_state *st = iio_priv(indio_dev);
160 160
161 st->event_timestamp = iio_get_time_ns(); 161 st->event_timestamp = iio_get_time_ns();
162 return IRQ_HANDLED; 162 return IRQ_WAKE_THREAD;
163} 163}
164 164
165/** 165/**
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index f61ef669644c..a4a9a763ff02 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1270,6 +1270,7 @@ static int
1270echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) 1270echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1271{ 1271{
1272 struct lov_stripe_md *ulsm = _ulsm; 1272 struct lov_stripe_md *ulsm = _ulsm;
1273 struct lov_oinfo **p;
1273 int nob, i; 1274 int nob, i;
1274 1275
1275 nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]); 1276 nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
@@ -1279,9 +1280,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1279 if (copy_to_user(ulsm, lsm, sizeof(*ulsm))) 1280 if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
1280 return -EFAULT; 1281 return -EFAULT;
1281 1282
1282 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1283 for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1283 if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i], 1284 struct lov_oinfo __user *up;
1284 sizeof(lsm->lsm_oinfo[0]))) 1285 if (get_user(up, ulsm->lsm_oinfo + i) ||
1286 copy_to_user(up, *p, sizeof(struct lov_oinfo)))
1285 return -EFAULT; 1287 return -EFAULT;
1286 } 1288 }
1287 return 0; 1289 return 0;
@@ -1289,9 +1291,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1289 1291
1290static int 1292static int
1291echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, 1293echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1292 void *ulsm, int ulsm_nob) 1294 struct lov_stripe_md __user *ulsm, int ulsm_nob)
1293{ 1295{
1294 struct echo_client_obd *ec = ed->ed_ec; 1296 struct echo_client_obd *ec = ed->ed_ec;
1297 struct lov_oinfo **p;
1295 int i; 1298 int i;
1296 1299
1297 if (ulsm_nob < sizeof(*lsm)) 1300 if (ulsm_nob < sizeof(*lsm))
@@ -1306,11 +1309,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1306 ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL)) 1309 ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
1307 return -EINVAL; 1310 return -EINVAL;
1308 1311
1309 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1312 for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1310 if (copy_from_user(lsm->lsm_oinfo[i], 1313 struct lov_oinfo __user *up;
1311 ((struct lov_stripe_md *)ulsm)-> \ 1314 if (get_user(up, ulsm->lsm_oinfo + i) ||
1312 lsm_oinfo[i], 1315 copy_from_user(*p, up, sizeof(struct lov_oinfo)))
1313 sizeof(lsm->lsm_oinfo[0])))
1314 return -EFAULT; 1316 return -EFAULT;
1315 } 1317 }
1316 return 0; 1318 return 0;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index ed776149261e..e49c2bce551d 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2054,13 +2054,13 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2054 size_t eol; 2054 size_t eol;
2055 size_t tail; 2055 size_t tail;
2056 int ret, found = 0; 2056 int ret, found = 0;
2057 bool eof_push = 0;
2058 2057
2059 /* N.B. avoid overrun if nr == 0 */ 2058 /* N.B. avoid overrun if nr == 0 */
2060 n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); 2059 if (!*nr)
2061 if (!n)
2062 return 0; 2060 return 0;
2063 2061
2062 n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
2063
2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); 2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); 2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
2066 2066
@@ -2081,12 +2081,11 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2081 n = eol - tail; 2081 n = eol - tail;
2082 if (n > N_TTY_BUF_SIZE) 2082 if (n > N_TTY_BUF_SIZE)
2083 n += N_TTY_BUF_SIZE; 2083 n += N_TTY_BUF_SIZE;
2084 n += found; 2084 c = n + found;
2085 c = n;
2086 2085
2087 if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { 2086 if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
2088 n--; 2087 c = min(*nr, c);
2089 eof_push = !n && ldata->read_tail != ldata->line_start; 2088 n = c;
2090 } 2089 }
2091 2090
2092 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", 2091 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n",
@@ -2116,7 +2115,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2116 ldata->push = 0; 2115 ldata->push = 0;
2117 tty_audit_push(tty); 2116 tty_audit_push(tty);
2118 } 2117 }
2119 return eof_push ? -EAGAIN : 0; 2118 return 0;
2120} 2119}
2121 2120
2122extern ssize_t redirected_tty_write(struct file *, const char __user *, 2121extern ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -2273,10 +2272,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2273 2272
2274 if (ldata->icanon && !L_EXTPROC(tty)) { 2273 if (ldata->icanon && !L_EXTPROC(tty)) {
2275 retval = canon_copy_from_read_buf(tty, &b, &nr); 2274 retval = canon_copy_from_read_buf(tty, &b, &nr);
2276 if (retval == -EAGAIN) { 2275 if (retval)
2277 retval = 0;
2278 continue;
2279 } else if (retval)
2280 break; 2276 break;
2281 } else { 2277 } else {
2282 int uncopied; 2278 int uncopied;
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index d11621e2cf1d..245edbb68d4b 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -115,12 +115,16 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
115 */ 115 */
116static int uniphier_serial_dl_read(struct uart_8250_port *up) 116static int uniphier_serial_dl_read(struct uart_8250_port *up)
117{ 117{
118 return readl(up->port.membase + UNIPHIER_UART_DLR); 118 int offset = UNIPHIER_UART_DLR << up->port.regshift;
119
120 return readl(up->port.membase + offset);
119} 121}
120 122
121static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) 123static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
122{ 124{
123 writel(value, up->port.membase + UNIPHIER_UART_DLR); 125 int offset = UNIPHIER_UART_DLR << up->port.regshift;
126
127 writel(value, up->port.membase + offset);
124} 128}
125 129
126static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port, 130static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index f09636083426..b5b2f2be6be7 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -115,6 +115,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
115 if (buf && !parse_options(&early_console_dev, buf)) 115 if (buf && !parse_options(&early_console_dev, buf))
116 buf = NULL; 116 buf = NULL;
117 117
118 spin_lock_init(&port->lock);
118 port->uartclk = BASE_BAUD * 16; 119 port->uartclk = BASE_BAUD * 16;
119 if (port->mapbase) 120 if (port->mapbase)
120 port->membase = earlycon_map(port->mapbase, 64); 121 port->membase = earlycon_map(port->mapbase, 64);
@@ -202,6 +203,7 @@ int __init of_setup_earlycon(unsigned long addr,
202 int err; 203 int err;
203 struct uart_port *port = &early_console_dev.port; 204 struct uart_port *port = &early_console_dev.port;
204 205
206 spin_lock_init(&port->lock);
205 port->iotype = UPIO_MEM; 207 port->iotype = UPIO_MEM;
206 port->mapbase = addr; 208 port->mapbase = addr;
207 port->uartclk = BASE_BAUD * 16; 209 port->uartclk = BASE_BAUD * 16;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 960e50a97558..51c7507b0444 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1437,7 +1437,7 @@ static void sci_request_dma(struct uart_port *port)
1437 sg_init_table(sg, 1); 1437 sg_init_table(sg, 1);
1438 s->rx_buf[i] = buf; 1438 s->rx_buf[i] = buf;
1439 sg_dma_address(sg) = dma; 1439 sg_dma_address(sg) = dma;
1440 sg->length = s->buf_len_rx; 1440 sg_dma_len(sg) = s->buf_len_rx;
1441 1441
1442 buf += s->buf_len_rx; 1442 buf += s->buf_len_rx;
1443 dma += s->buf_len_rx; 1443 dma += s->buf_len_rx;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9a479e61791a..3cd31e0d4bd9 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -450,7 +450,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
450 count = disc->ops->receive_buf2(tty, p, f, count); 450 count = disc->ops->receive_buf2(tty, p, f, count);
451 else { 451 else {
452 count = min_t(int, count, tty->receive_room); 452 count = min_t(int, count, tty->receive_room);
453 if (count) 453 if (count && disc->ops->receive_buf)
454 disc->ops->receive_buf(tty, p, f, count); 454 disc->ops->receive_buf(tty, p, f, count);
455 } 455 }
456 return count; 456 return count;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index b30e7423549b..26ca4f910cb0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1838,6 +1838,11 @@ static const struct usb_device_id acm_ids[] = {
1838 }, 1838 },
1839#endif 1839#endif
1840 1840
1841 /* Exclude Infineon Flash Loader utility */
1842 { USB_DEVICE(0x058b, 0x0041),
1843 .driver_info = IGNORE_DEVICE,
1844 },
1845
1841 /* control interfaces without any protocol set */ 1846 /* control interfaces without any protocol set */
1842 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1847 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1843 USB_CDC_PROTO_NONE) }, 1848 USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7caff020106e..5050760f5e17 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
115 USB_SS_MULT(desc->bmAttributes) > 3) { 115 USB_SS_MULT(desc->bmAttributes) > 3) {
116 dev_warn(ddev, "Isoc endpoint has Mult of %d in " 116 dev_warn(ddev, "Isoc endpoint has Mult of %d in "
117 "config %d interface %d altsetting %d ep %d: " 117 "config %d interface %d altsetting %d ep %d: "
118 "setting to 3\n", desc->bmAttributes + 1, 118 "setting to 3\n",
119 USB_SS_MULT(desc->bmAttributes),
119 cfgno, inum, asnum, ep->desc.bEndpointAddress); 120 cfgno, inum, asnum, ep->desc.bEndpointAddress);
120 ep->ss_ep_comp.bmAttributes = 2; 121 ep->ss_ep_comp.bmAttributes = 2;
121 } 122 }
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bdeadc112d29..ddbf32d599cb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
124 124
125int usb_device_supports_lpm(struct usb_device *udev) 125int usb_device_supports_lpm(struct usb_device *udev)
126{ 126{
127 /* Some devices have trouble with LPM */
128 if (udev->quirks & USB_QUIRK_NO_LPM)
129 return 0;
130
127 /* USB 2.1 (and greater) devices indicate LPM support through 131 /* USB 2.1 (and greater) devices indicate LPM support through
128 * their USB 2.0 Extended Capabilities BOS descriptor. 132 * their USB 2.0 Extended Capabilities BOS descriptor.
129 */ 133 */
@@ -1031,10 +1035,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1031 unsigned delay; 1035 unsigned delay;
1032 1036
1033 /* Continue a partial initialization */ 1037 /* Continue a partial initialization */
1034 if (type == HUB_INIT2) 1038 if (type == HUB_INIT2 || type == HUB_INIT3) {
1035 goto init2; 1039 device_lock(hub->intfdev);
1036 if (type == HUB_INIT3) 1040
1041 /* Was the hub disconnected while we were waiting? */
1042 if (hub->disconnected) {
1043 device_unlock(hub->intfdev);
1044 kref_put(&hub->kref, hub_release);
1045 return;
1046 }
1047 if (type == HUB_INIT2)
1048 goto init2;
1037 goto init3; 1049 goto init3;
1050 }
1051 kref_get(&hub->kref);
1038 1052
1039 /* The superspeed hub except for root hub has to use Hub Depth 1053 /* The superspeed hub except for root hub has to use Hub Depth
1040 * value as an offset into the route string to locate the bits 1054 * value as an offset into the route string to locate the bits
@@ -1232,6 +1246,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1232 queue_delayed_work(system_power_efficient_wq, 1246 queue_delayed_work(system_power_efficient_wq,
1233 &hub->init_work, 1247 &hub->init_work,
1234 msecs_to_jiffies(delay)); 1248 msecs_to_jiffies(delay));
1249 device_unlock(hub->intfdev);
1235 return; /* Continues at init3: below */ 1250 return; /* Continues at init3: below */
1236 } else { 1251 } else {
1237 msleep(delay); 1252 msleep(delay);
@@ -1253,6 +1268,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1253 /* Allow autosuspend if it was suppressed */ 1268 /* Allow autosuspend if it was suppressed */
1254 if (type <= HUB_INIT3) 1269 if (type <= HUB_INIT3)
1255 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); 1270 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
1271
1272 if (type == HUB_INIT2 || type == HUB_INIT3)
1273 device_unlock(hub->intfdev);
1274
1275 kref_put(&hub->kref, hub_release);
1256} 1276}
1257 1277
1258/* Implement the continuations for the delays above */ 1278/* Implement the continuations for the delays above */
@@ -4512,6 +4532,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
4512 goto fail; 4532 goto fail;
4513 } 4533 }
4514 4534
4535 usb_detect_quirks(udev);
4536
4515 if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { 4537 if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
4516 retval = usb_get_bos_descriptor(udev); 4538 retval = usb_get_bos_descriptor(udev);
4517 if (!retval) { 4539 if (!retval) {
@@ -4710,7 +4732,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4710 if (status < 0) 4732 if (status < 0)
4711 goto loop; 4733 goto loop;
4712 4734
4713 usb_detect_quirks(udev);
4714 if (udev->quirks & USB_QUIRK_DELAY_INIT) 4735 if (udev->quirks & USB_QUIRK_DELAY_INIT)
4715 msleep(1000); 4736 msleep(1000);
4716 4737
@@ -5326,9 +5347,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5326 if (udev->usb2_hw_lpm_enabled == 1) 5347 if (udev->usb2_hw_lpm_enabled == 1)
5327 usb_set_usb2_hardware_lpm(udev, 0); 5348 usb_set_usb2_hardware_lpm(udev, 0);
5328 5349
5329 bos = udev->bos;
5330 udev->bos = NULL;
5331
5332 /* Disable LPM and LTM while we reset the device and reinstall the alt 5350 /* Disable LPM and LTM while we reset the device and reinstall the alt
5333 * settings. Device-initiated LPM settings, and system exit latency 5351 * settings. Device-initiated LPM settings, and system exit latency
5334 * settings are cleared when the device is reset, so we have to set 5352 * settings are cleared when the device is reset, so we have to set
@@ -5337,15 +5355,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5337 ret = usb_unlocked_disable_lpm(udev); 5355 ret = usb_unlocked_disable_lpm(udev);
5338 if (ret) { 5356 if (ret) {
5339 dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); 5357 dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
5340 goto re_enumerate; 5358 goto re_enumerate_no_bos;
5341 } 5359 }
5342 ret = usb_disable_ltm(udev); 5360 ret = usb_disable_ltm(udev);
5343 if (ret) { 5361 if (ret) {
5344 dev_err(&udev->dev, "%s Failed to disable LTM\n.", 5362 dev_err(&udev->dev, "%s Failed to disable LTM\n.",
5345 __func__); 5363 __func__);
5346 goto re_enumerate; 5364 goto re_enumerate_no_bos;
5347 } 5365 }
5348 5366
5367 bos = udev->bos;
5368 udev->bos = NULL;
5369
5349 for (i = 0; i < SET_CONFIG_TRIES; ++i) { 5370 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
5350 5371
5351 /* ep0 maxpacket size may change; let the HCD know about it. 5372 /* ep0 maxpacket size may change; let the HCD know about it.
@@ -5442,10 +5463,11 @@ done:
5442 return 0; 5463 return 0;
5443 5464
5444re_enumerate: 5465re_enumerate:
5445 /* LPM state doesn't matter when we're about to destroy the device. */
5446 hub_port_logical_disconnect(parent_hub, port1);
5447 usb_release_bos_descriptor(udev); 5466 usb_release_bos_descriptor(udev);
5448 udev->bos = bos; 5467 udev->bos = bos;
5468re_enumerate_no_bos:
5469 /* LPM state doesn't matter when we're about to destroy the device. */
5470 hub_port_logical_disconnect(parent_hub, port1);
5449 return -ENODEV; 5471 return -ENODEV;
5450} 5472}
5451 5473
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 210618319f10..5487fe308f01 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -206,7 +206,7 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
206 else 206 else
207 method = "default"; 207 method = "default";
208 208
209 pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n", 209 pr_debug("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
210 dev_name(&left->dev), dev_name(&right->dev), method, 210 dev_name(&left->dev), dev_name(&right->dev), method,
211 dev_name(&left->dev), 211 dev_name(&left->dev),
212 lpeer ? dev_name(&lpeer->dev) : "none", 212 lpeer ? dev_name(&lpeer->dev) : "none",
@@ -265,7 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
265 if (rc == 0) { 265 if (rc == 0) {
266 dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev)); 266 dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev));
267 } else { 267 } else {
268 dev_warn(&left->dev, "failed to peer to %s (%d)\n", 268 dev_dbg(&left->dev, "failed to peer to %s (%d)\n",
269 dev_name(&right->dev), rc); 269 dev_name(&right->dev), rc);
270 pr_warn_once("usb: port power management may be unreliable\n"); 270 pr_warn_once("usb: port power management may be unreliable\n");
271 usb_port_block_power_off = 1; 271 usb_port_block_power_off = 1;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5a381945db2..6dc810bce295 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -125,6 +125,9 @@ static const struct usb_device_id usb_quirk_list[] = {
125 { USB_DEVICE(0x04f3, 0x016f), .driver_info = 125 { USB_DEVICE(0x04f3, 0x016f), .driver_info =
126 USB_QUIRK_DEVICE_QUALIFIER }, 126 USB_QUIRK_DEVICE_QUALIFIER },
127 127
128 { USB_DEVICE(0x04f3, 0x21b8), .driver_info =
129 USB_QUIRK_DEVICE_QUALIFIER },
130
128 /* Roland SC-8820 */ 131 /* Roland SC-8820 */
129 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 132 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
130 133
@@ -199,6 +202,12 @@ static const struct usb_device_id usb_quirk_list[] = {
199 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 202 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
200 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 203 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
201 204
205 /* Blackmagic Design Intensity Shuttle */
206 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
207
208 /* Blackmagic Design UltraStudio SDI */
209 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
210
202 { } /* terminating entry must be last */ 211 { } /* terminating entry must be last */
203}; 212};
204 213
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index e61d773cf65e..39c1cbf0e75d 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -125,9 +125,11 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
125 if (ret) 125 if (ret)
126 return ret; 126 return ret;
127 127
128 ret = clk_prepare_enable(hsotg->clk); 128 if (hsotg->clk) {
129 if (ret) 129 ret = clk_prepare_enable(hsotg->clk);
130 return ret; 130 if (ret)
131 return ret;
132 }
131 133
132 if (hsotg->uphy) 134 if (hsotg->uphy)
133 ret = usb_phy_init(hsotg->uphy); 135 ret = usb_phy_init(hsotg->uphy);
@@ -175,7 +177,8 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
175 if (ret) 177 if (ret)
176 return ret; 178 return ret;
177 179
178 clk_disable_unprepare(hsotg->clk); 180 if (hsotg->clk)
181 clk_disable_unprepare(hsotg->clk);
179 182
180 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), 183 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
181 hsotg->supplies); 184 hsotg->supplies);
@@ -212,14 +215,41 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
212 */ 215 */
213 hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy"); 216 hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy");
214 if (IS_ERR(hsotg->phy)) { 217 if (IS_ERR(hsotg->phy)) {
215 hsotg->phy = NULL; 218 ret = PTR_ERR(hsotg->phy);
219 switch (ret) {
220 case -ENODEV:
221 case -ENOSYS:
222 hsotg->phy = NULL;
223 break;
224 case -EPROBE_DEFER:
225 return ret;
226 default:
227 dev_err(hsotg->dev, "error getting phy %d\n", ret);
228 return ret;
229 }
230 }
231
232 if (!hsotg->phy) {
216 hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2); 233 hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2);
217 if (IS_ERR(hsotg->uphy)) 234 if (IS_ERR(hsotg->uphy)) {
218 hsotg->uphy = NULL; 235 ret = PTR_ERR(hsotg->uphy);
219 else 236 switch (ret) {
220 hsotg->plat = dev_get_platdata(hsotg->dev); 237 case -ENODEV:
238 case -ENXIO:
239 hsotg->uphy = NULL;
240 break;
241 case -EPROBE_DEFER:
242 return ret;
243 default:
244 dev_err(hsotg->dev, "error getting usb phy %d\n",
245 ret);
246 return ret;
247 }
248 }
221 } 249 }
222 250
251 hsotg->plat = dev_get_platdata(hsotg->dev);
252
223 if (hsotg->phy) { 253 if (hsotg->phy) {
224 /* 254 /*
225 * If using the generic PHY framework, check if the PHY bus 255 * If using the generic PHY framework, check if the PHY bus
@@ -229,11 +259,6 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
229 hsotg->phyif = GUSBCFG_PHYIF8; 259 hsotg->phyif = GUSBCFG_PHYIF8;
230 } 260 }
231 261
232 if (!hsotg->phy && !hsotg->uphy && !hsotg->plat) {
233 dev_err(hsotg->dev, "no platform data or transceiver defined\n");
234 return -EPROBE_DEFER;
235 }
236
237 /* Clock */ 262 /* Clock */
238 hsotg->clk = devm_clk_get(hsotg->dev, "otg"); 263 hsotg->clk = devm_clk_get(hsotg->dev, "otg");
239 if (IS_ERR(hsotg->clk)) { 264 if (IS_ERR(hsotg->clk)) {
@@ -342,20 +367,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
342 if (retval) 367 if (retval)
343 return retval; 368 return retval;
344 369
345 irq = platform_get_irq(dev, 0);
346 if (irq < 0) {
347 dev_err(&dev->dev, "missing IRQ resource\n");
348 return irq;
349 }
350
351 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
352 irq);
353 retval = devm_request_irq(hsotg->dev, irq,
354 dwc2_handle_common_intr, IRQF_SHARED,
355 dev_name(hsotg->dev), hsotg);
356 if (retval)
357 return retval;
358
359 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 370 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
360 hsotg->regs = devm_ioremap_resource(&dev->dev, res); 371 hsotg->regs = devm_ioremap_resource(&dev->dev, res);
361 if (IS_ERR(hsotg->regs)) 372 if (IS_ERR(hsotg->regs))
@@ -390,6 +401,20 @@ static int dwc2_driver_probe(struct platform_device *dev)
390 401
391 dwc2_set_all_params(hsotg->core_params, -1); 402 dwc2_set_all_params(hsotg->core_params, -1);
392 403
404 irq = platform_get_irq(dev, 0);
405 if (irq < 0) {
406 dev_err(&dev->dev, "missing IRQ resource\n");
407 return irq;
408 }
409
410 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
411 irq);
412 retval = devm_request_irq(hsotg->dev, irq,
413 dwc2_handle_common_intr, IRQF_SHARED,
414 dev_name(hsotg->dev), hsotg);
415 if (retval)
416 return retval;
417
393 retval = dwc2_lowlevel_hw_enable(hsotg); 418 retval = dwc2_lowlevel_hw_enable(hsotg);
394 if (retval) 419 if (retval)
395 return retval; 420 return retval;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e24a01cc98df..a58376fd65fe 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1078,6 +1078,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1078 * little bit faster. 1078 * little bit faster.
1079 */ 1079 */
1080 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1080 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1081 !usb_endpoint_xfer_int(dep->endpoint.desc) &&
1081 !(dep->flags & DWC3_EP_BUSY)) { 1082 !(dep->flags & DWC3_EP_BUSY)) {
1082 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1083 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1083 goto out; 1084 goto out;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index adc6d52efa46..cf43e9e18368 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -423,7 +423,7 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
423 spin_unlock_irq(&ffs->ev.waitq.lock); 423 spin_unlock_irq(&ffs->ev.waitq.lock);
424 mutex_unlock(&ffs->mutex); 424 mutex_unlock(&ffs->mutex);
425 425
426 return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size; 426 return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
427} 427}
428 428
429static ssize_t ffs_ep0_read(struct file *file, char __user *buf, 429static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
@@ -513,7 +513,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
513 513
514 /* unlocks spinlock */ 514 /* unlocks spinlock */
515 ret = __ffs_ep0_queue_wait(ffs, data, len); 515 ret = __ffs_ep0_queue_wait(ffs, data, len);
516 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len))) 516 if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
517 ret = -EFAULT; 517 ret = -EFAULT;
518 goto done_mutex; 518 goto done_mutex;
519 519
@@ -3493,7 +3493,7 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3493 if (unlikely(!data)) 3493 if (unlikely(!data))
3494 return ERR_PTR(-ENOMEM); 3494 return ERR_PTR(-ENOMEM);
3495 3495
3496 if (unlikely(__copy_from_user(data, buf, len))) { 3496 if (unlikely(copy_from_user(data, buf, len))) {
3497 kfree(data); 3497 kfree(data);
3498 return ERR_PTR(-EFAULT); 3498 return ERR_PTR(-EFAULT);
3499 } 3499 }
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 42acb45e1ab4..898a570319f1 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -370,6 +370,7 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
370 if (err) { 370 if (err) {
371 ERROR(midi, "%s queue req: %d\n", 371 ERROR(midi, "%s queue req: %d\n",
372 midi->out_ep->name, err); 372 midi->out_ep->name, err);
373 free_ep_req(midi->out_ep, req);
373 } 374 }
374 } 375 }
375 376
@@ -545,7 +546,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
545 } 546 }
546 } 547 }
547 548
548 if (req->length > 0) { 549 if (req->length > 0 && ep->enabled) {
549 int err; 550 int err;
550 551
551 err = usb_ep_queue(ep, req, GFP_ATOMIC); 552 err = usb_ep_queue(ep, req, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 289ebca316d3..ad8c9b05572d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -20,7 +20,7 @@
20#define UVC_ATTR(prefix, cname, aname) \ 20#define UVC_ATTR(prefix, cname, aname) \
21static struct configfs_attribute prefix##attr_##cname = { \ 21static struct configfs_attribute prefix##attr_##cname = { \
22 .ca_name = __stringify(aname), \ 22 .ca_name = __stringify(aname), \
23 .ca_mode = S_IRUGO, \ 23 .ca_mode = S_IRUGO | S_IWUGO, \
24 .ca_owner = THIS_MODULE, \ 24 .ca_owner = THIS_MODULE, \
25 .show = prefix##cname##_show, \ 25 .show = prefix##cname##_show, \
26 .store = prefix##cname##_store, \ 26 .store = prefix##cname##_store, \
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 670ac0b12f00..001a3b74a993 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2536,6 +2536,9 @@ static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2536 udc->pullup_resume = udc->pullup_on; 2536 udc->pullup_resume = udc->pullup_on;
2537 dplus_pullup(udc, 0); 2537 dplus_pullup(udc, 0);
2538 2538
2539 if (udc->driver)
2540 udc->driver->disconnect(&udc->gadget);
2541
2539 return 0; 2542 return 0;
2540} 2543}
2541 2544
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 342ffd140122..8c6e15bd6ff0 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -473,6 +473,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
473 if (!pdata) 473 if (!pdata)
474 return -ENOMEM; 474 return -ENOMEM;
475 475
476 pdev->dev.platform_data = pdata;
477
476 if (!of_property_read_u32(np, "num-ports", &ports)) 478 if (!of_property_read_u32(np, "num-ports", &ports))
477 pdata->ports = ports; 479 pdata->ports = ports;
478 480
@@ -483,6 +485,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
483 */ 485 */
484 if (i >= pdata->ports) { 486 if (i >= pdata->ports) {
485 pdata->vbus_pin[i] = -EINVAL; 487 pdata->vbus_pin[i] = -EINVAL;
488 pdata->overcurrent_pin[i] = -EINVAL;
486 continue; 489 continue;
487 } 490 }
488 491
@@ -513,10 +516,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
513 } 516 }
514 517
515 at91_for_each_port(i) { 518 at91_for_each_port(i) {
516 if (i >= pdata->ports) { 519 if (i >= pdata->ports)
517 pdata->overcurrent_pin[i] = -EINVAL; 520 break;
518 continue;
519 }
520 521
521 pdata->overcurrent_pin[i] = 522 pdata->overcurrent_pin[i] =
522 of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags); 523 of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
@@ -552,8 +553,6 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
552 } 553 }
553 } 554 }
554 555
555 pdev->dev.platform_data = pdata;
556
557 device_init_wakeup(&pdev->dev, 1); 556 device_init_wakeup(&pdev->dev, 1);
558 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); 557 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
559} 558}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index dc31c425ce01..9f1c0538b211 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -377,6 +377,10 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
377 if (std->pl_virt == NULL) 377 if (std->pl_virt == NULL)
378 return -ENOMEM; 378 return -ENOMEM;
379 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); 379 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
380 if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
381 kfree(std->pl_virt);
382 return -EFAULT;
383 }
380 384
381 for (p = 0; p < std->num_pointers; p++) { 385 for (p = 0; p < std->num_pointers; p++) {
382 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 386 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0230965fb78c..f980c239eded 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -733,8 +733,30 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
733 if ((raw_port_status & PORT_RESET) || 733 if ((raw_port_status & PORT_RESET) ||
734 !(raw_port_status & PORT_PE)) 734 !(raw_port_status & PORT_PE))
735 return 0xffffffff; 735 return 0xffffffff;
736 if (time_after_eq(jiffies, 736 /* did port event handler already start resume timing? */
737 bus_state->resume_done[wIndex])) { 737 if (!bus_state->resume_done[wIndex]) {
738 /* If not, maybe we are in a host initated resume? */
739 if (test_bit(wIndex, &bus_state->resuming_ports)) {
740 /* Host initated resume doesn't time the resume
741 * signalling using resume_done[].
742 * It manually sets RESUME state, sleeps 20ms
743 * and sets U0 state. This should probably be
744 * changed, but not right now.
745 */
746 } else {
747 /* port resume was discovered now and here,
748 * start resume timing
749 */
750 unsigned long timeout = jiffies +
751 msecs_to_jiffies(USB_RESUME_TIMEOUT);
752
753 set_bit(wIndex, &bus_state->resuming_ports);
754 bus_state->resume_done[wIndex] = timeout;
755 mod_timer(&hcd->rh_timer, timeout);
756 }
757 /* Has resume been signalled for USB_RESUME_TIME yet? */
758 } else if (time_after_eq(jiffies,
759 bus_state->resume_done[wIndex])) {
738 int time_left; 760 int time_left;
739 761
740 xhci_dbg(xhci, "Resume USB2 port %d\n", 762 xhci_dbg(xhci, "Resume USB2 port %d\n",
@@ -775,13 +797,26 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
775 } else { 797 } else {
776 /* 798 /*
777 * The resume has been signaling for less than 799 * The resume has been signaling for less than
778 * 20ms. Report the port status as SUSPEND, 800 * USB_RESUME_TIME. Report the port status as SUSPEND,
779 * let the usbcore check port status again 801 * let the usbcore check port status again and clear
780 * and clear resume signaling later. 802 * resume signaling later.
781 */ 803 */
782 status |= USB_PORT_STAT_SUSPEND; 804 status |= USB_PORT_STAT_SUSPEND;
783 } 805 }
784 } 806 }
807 /*
808 * Clear stale usb2 resume signalling variables in case port changed
809 * state during resume signalling. For example on error
810 */
811 if ((bus_state->resume_done[wIndex] ||
812 test_bit(wIndex, &bus_state->resuming_ports)) &&
813 (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
814 (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
815 bus_state->resume_done[wIndex] = 0;
816 clear_bit(wIndex, &bus_state->resuming_ports);
817 }
818
819
785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 && 820 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
786 (raw_port_status & PORT_POWER)) { 821 (raw_port_status & PORT_POWER)) {
787 if (bus_state->suspended_ports & (1 << wIndex)) { 822 if (bus_state->suspended_ports & (1 << wIndex)) {
@@ -1115,6 +1150,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1115 if ((temp & PORT_PE) == 0) 1150 if ((temp & PORT_PE) == 0)
1116 goto error; 1151 goto error;
1117 1152
1153 set_bit(wIndex, &bus_state->resuming_ports);
1118 xhci_set_link_state(xhci, port_array, wIndex, 1154 xhci_set_link_state(xhci, port_array, wIndex,
1119 XDEV_RESUME); 1155 XDEV_RESUME);
1120 spin_unlock_irqrestore(&xhci->lock, flags); 1156 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1122,6 +1158,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1122 spin_lock_irqsave(&xhci->lock, flags); 1158 spin_lock_irqsave(&xhci->lock, flags);
1123 xhci_set_link_state(xhci, port_array, wIndex, 1159 xhci_set_link_state(xhci, port_array, wIndex,
1124 XDEV_U0); 1160 XDEV_U0);
1161 clear_bit(wIndex, &bus_state->resuming_ports);
1125 } 1162 }
1126 bus_state->port_c_suspend |= 1 << wIndex; 1163 bus_state->port_c_suspend |= 1 << wIndex;
1127 1164
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17f6897acde2..c62109091d12 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -188,10 +188,14 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
188 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, 188 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
189 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, 189 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
190 }; 190 };
191 acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); 191 union acpi_object *obj;
192
193 obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
194 NULL);
195 ACPI_FREE(obj);
192} 196}
193#else 197#else
194 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } 198static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
195#endif /* CONFIG_ACPI */ 199#endif /* CONFIG_ACPI */
196 200
197/* called during probe() after chip reset completes */ 201/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6c5e8133cf87..eeaa6c6bd540 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1583,7 +1583,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
1583 */ 1583 */
1584 bogus_port_status = true; 1584 bogus_port_status = true;
1585 goto cleanup; 1585 goto cleanup;
1586 } else { 1586 } else if (!test_bit(faked_port_index,
1587 &bus_state->resuming_ports)) {
1587 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1588 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1588 bus_state->resume_done[faked_port_index] = jiffies + 1589 bus_state->resume_done[faked_port_index] = jiffies +
1589 msecs_to_jiffies(USB_RESUME_TIMEOUT); 1590 msecs_to_jiffies(USB_RESUME_TIMEOUT);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dfa44d3e8eee..3f912705dcef 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4778,8 +4778,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4778 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4778 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4779 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 4779 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4780 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 4780 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4781 /*
4782 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4783 * but it may be already set to 1 when setup an xHCI virtual
4784 * device, so clear it anyway.
4785 */
4781 if (tt->multi) 4786 if (tt->multi)
4782 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 4787 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4788 else if (hdev->speed == USB_SPEED_FULL)
4789 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4790
4783 if (xhci->hci_version > 0x95) { 4791 if (xhci->hci_version > 0x95) {
4784 xhci_dbg(xhci, "xHCI version %x needs hub " 4792 xhci_dbg(xhci, "xHCI version %x needs hub "
4785 "TT think time and number of ports\n", 4793 "TT think time and number of ports\n",
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 1f2037bbeb0d..45c83baf675d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -159,7 +159,7 @@ config USB_TI_CPPI_DMA
159 159
160config USB_TI_CPPI41_DMA 160config USB_TI_CPPI41_DMA
161 bool 'TI CPPI 4.1 (AM335x)' 161 bool 'TI CPPI 4.1 (AM335x)'
162 depends on ARCH_OMAP 162 depends on ARCH_OMAP && DMADEVICES
163 select TI_CPPI41 163 select TI_CPPI41
164 164
165config USB_TUSB_OMAP_DMA 165config USB_TUSB_OMAP_DMA
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 18cfc0a361cb..ee9ff7028b92 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2017,7 +2017,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2017 /* We need musb_read/write functions initialized for PM */ 2017 /* We need musb_read/write functions initialized for PM */
2018 pm_runtime_use_autosuspend(musb->controller); 2018 pm_runtime_use_autosuspend(musb->controller);
2019 pm_runtime_set_autosuspend_delay(musb->controller, 200); 2019 pm_runtime_set_autosuspend_delay(musb->controller, 200);
2020 pm_runtime_irq_safe(musb->controller);
2021 pm_runtime_enable(musb->controller); 2020 pm_runtime_enable(musb->controller);
2022 2021
2023 /* The musb_platform_init() call: 2022 /* The musb_platform_init() call:
@@ -2095,6 +2094,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2095#ifndef CONFIG_MUSB_PIO_ONLY 2094#ifndef CONFIG_MUSB_PIO_ONLY
2096 if (!musb->ops->dma_init || !musb->ops->dma_exit) { 2095 if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2097 dev_err(dev, "DMA controller not set\n"); 2096 dev_err(dev, "DMA controller not set\n");
2097 status = -ENODEV;
2098 goto fail2; 2098 goto fail2;
2099 } 2099 }
2100 musb_dma_controller_create = musb->ops->dma_init; 2100 musb_dma_controller_create = musb->ops->dma_init;
@@ -2218,6 +2218,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2218 2218
2219 pm_runtime_put(musb->controller); 2219 pm_runtime_put(musb->controller);
2220 2220
2221 /*
2222 * For why this is currently needed, see commit 3e43a0725637
2223 * ("usb: musb: core: add pm_runtime_irq_safe()")
2224 */
2225 pm_runtime_irq_safe(musb->controller);
2226
2221 return 0; 2227 return 0;
2222 2228
2223fail5: 2229fail5:
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 80eb991c2506..0d19a6d61a71 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1506,7 +1506,6 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
1506{ 1506{
1507 struct msm_otg_platform_data *pdata; 1507 struct msm_otg_platform_data *pdata;
1508 struct extcon_dev *ext_id, *ext_vbus; 1508 struct extcon_dev *ext_id, *ext_vbus;
1509 const struct of_device_id *id;
1510 struct device_node *node = pdev->dev.of_node; 1509 struct device_node *node = pdev->dev.of_node;
1511 struct property *prop; 1510 struct property *prop;
1512 int len, ret, words; 1511 int len, ret, words;
@@ -1518,8 +1517,9 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
1518 1517
1519 motg->pdata = pdata; 1518 motg->pdata = pdata;
1520 1519
1521 id = of_match_device(msm_otg_dt_match, &pdev->dev); 1520 pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
1522 pdata->phy_type = (enum msm_usb_phy_type) id->data; 1521 if (!pdata->phy_type)
1522 return 1;
1523 1523
1524 motg->link_rst = devm_reset_control_get(&pdev->dev, "link"); 1524 motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
1525 if (IS_ERR(motg->link_rst)) 1525 if (IS_ERR(motg->link_rst))
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index b7536af777ab..c2936dc48ca7 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -143,12 +143,17 @@ static const struct mxs_phy_data imx6sx_phy_data = {
143 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS, 143 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
144}; 144};
145 145
146static const struct mxs_phy_data imx6ul_phy_data = {
147 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
148};
149
146static const struct of_device_id mxs_phy_dt_ids[] = { 150static const struct of_device_id mxs_phy_dt_ids[] = {
147 { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, }, 151 { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
148 { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, }, 152 { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
149 { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, }, 153 { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
150 { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, }, 154 { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
151 { .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, }, 155 { .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, },
156 { .compatible = "fsl,imx6ul-usbphy", .data = &imx6ul_phy_data, },
152 { /* sentinel */ } 157 { /* sentinel */ }
153}; 158};
154MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids); 159MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index de4f97d84a82..8f7a78e70975 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -131,7 +131,8 @@ static void __usbhsg_queue_pop(struct usbhsg_uep *uep,
131 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 131 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
132 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); 132 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
133 133
134 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); 134 if (pipe)
135 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
135 136
136 ureq->req.status = status; 137 ureq->req.status = status;
137 spin_unlock(usbhs_priv_to_lock(priv)); 138 spin_unlock(usbhs_priv_to_lock(priv));
@@ -685,7 +686,13 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
685 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); 686 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
686 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 687 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
687 688
688 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); 689 if (pipe)
690 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
691
692 /*
693 * To dequeue a request, this driver should call the usbhsg_queue_pop()
694 * even if the pipe is NULL.
695 */
689 usbhsg_queue_pop(uep, ureq, -ECONNRESET); 696 usbhsg_queue_pop(uep, ureq, -ECONNRESET);
690 697
691 return 0; 698 return 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eac7ccaa3c85..7d4f51a32e66 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -132,7 +132,6 @@ static const struct usb_device_id id_table[] = {
132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
135 { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
136 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 135 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
137 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ 136 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
138 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ 137 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index f51a5d52c0ed..ec1b8f2c1183 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty,
531 * through. Since this has a reasonably high failure rate, we retry 531 * through. Since this has a reasonably high failure rate, we retry
532 * several times. 532 * several times.
533 */ 533 */
534 while (retries--) { 534 while (retries) {
535 retries--;
535 result = usb_control_msg(serial->dev, 536 result = usb_control_msg(serial->dev,
536 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 537 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
537 0x1, 0, NULL, 0, 100); 538 0x1, 0, NULL, 0, 100);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 3658662898fc..a204782ae530 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -53,6 +53,7 @@ DEVICE(funsoft, FUNSOFT_IDS);
53 53
54/* Infineon Flashloader driver */ 54/* Infineon Flashloader driver */
55#define FLASHLOADER_IDS() \ 55#define FLASHLOADER_IDS() \
56 { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
56 { USB_DEVICE(0x8087, 0x0716) } 57 { USB_DEVICE(0x8087, 0x0716) }
57DEVICE(flashloader, FLASHLOADER_IDS); 58DEVICE(flashloader, FLASHLOADER_IDS);
58 59
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index e69151664436..5c66d3f7a6d0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -796,6 +796,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
796 if (devinfo->flags & US_FL_NO_REPORT_OPCODES) 796 if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
797 sdev->no_report_opcodes = 1; 797 sdev->no_report_opcodes = 1;
798 798
799 /* A few buggy USB-ATA bridges don't understand FUA */
800 if (devinfo->flags & US_FL_BROKEN_FUA)
801 sdev->broken_fua = 1;
802
799 scsi_change_queue_depth(sdev, devinfo->qdepth - 2); 803 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
800 return 0; 804 return 0;
801} 805}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 6b2479123de7..7ffe4209067b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1987,7 +1987,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1987 US_FL_IGNORE_RESIDUE ), 1987 US_FL_IGNORE_RESIDUE ),
1988 1988
1989/* Reported by Michael Büsch <m@bues.ch> */ 1989/* Reported by Michael Büsch <m@bues.ch> */
1990UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114, 1990UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
1991 "JMicron", 1991 "JMicron",
1992 "USB to ATA/ATAPI Bridge", 1992 "USB to ATA/ATAPI Bridge",
1993 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1993 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c85ea530085f..ccc113e83d88 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -132,7 +132,7 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
132 "JMicron", 132 "JMicron",
133 "JMS567", 133 "JMS567",
134 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 134 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
135 US_FL_NO_REPORT_OPCODES), 135 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
136 136
137/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 137/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
138UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 138UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index da6e2ce77495..850d86ca685b 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -31,21 +31,6 @@ menuconfig VFIO
31 31
32 If you don't know what to do here, say N. 32 If you don't know what to do here, say N.
33 33
34menuconfig VFIO_NOIOMMU
35 bool "VFIO No-IOMMU support"
36 depends on VFIO
37 help
38 VFIO is built on the ability to isolate devices using the IOMMU.
39 Only with an IOMMU can userspace access to DMA capable devices be
40 considered secure. VFIO No-IOMMU mode enables IOMMU groups for
41 devices without IOMMU backing for the purpose of re-using the VFIO
42 infrastructure in a non-secure mode. Use of this mode will result
43 in an unsupportable kernel and will therefore taint the kernel.
44 Device assignment to virtual machines is also not possible with
45 this mode since there is no IOMMU to provide DMA translation.
46
47 If you don't know what to do here, say N.
48
49source "drivers/vfio/pci/Kconfig" 34source "drivers/vfio/pci/Kconfig"
50source "drivers/vfio/platform/Kconfig" 35source "drivers/vfio/platform/Kconfig"
51source "virt/lib/Kconfig" 36source "virt/lib/Kconfig"
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 32b88bd2c82c..56bf6dbb93db 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -940,13 +940,13 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
940 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) 940 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
941 return -EINVAL; 941 return -EINVAL;
942 942
943 group = vfio_iommu_group_get(&pdev->dev); 943 group = iommu_group_get(&pdev->dev);
944 if (!group) 944 if (!group)
945 return -EINVAL; 945 return -EINVAL;
946 946
947 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); 947 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
948 if (!vdev) { 948 if (!vdev) {
949 vfio_iommu_group_put(group, &pdev->dev); 949 iommu_group_put(group);
950 return -ENOMEM; 950 return -ENOMEM;
951 } 951 }
952 952
@@ -957,7 +957,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
957 957
958 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); 958 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
959 if (ret) { 959 if (ret) {
960 vfio_iommu_group_put(group, &pdev->dev); 960 iommu_group_put(group);
961 kfree(vdev); 961 kfree(vdev);
962 return ret; 962 return ret;
963 } 963 }
@@ -993,7 +993,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
993 if (!vdev) 993 if (!vdev)
994 return; 994 return;
995 995
996 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); 996 iommu_group_put(pdev->dev.iommu_group);
997 kfree(vdev); 997 kfree(vdev);
998 998
999 if (vfio_pci_is_vga(pdev)) { 999 if (vfio_pci_is_vga(pdev)) {
@@ -1035,7 +1035,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1035 return PCI_ERS_RESULT_CAN_RECOVER; 1035 return PCI_ERS_RESULT_CAN_RECOVER;
1036} 1036}
1037 1037
1038static struct pci_error_handlers vfio_err_handlers = { 1038static const struct pci_error_handlers vfio_err_handlers = {
1039 .error_detected = vfio_pci_aer_err_detected, 1039 .error_detected = vfio_pci_aer_err_detected,
1040}; 1040};
1041 1041
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index f1625dcfbb23..b1cc3a768784 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -92,7 +92,6 @@ static struct platform_driver vfio_platform_driver = {
92 .remove = vfio_platform_remove, 92 .remove = vfio_platform_remove,
93 .driver = { 93 .driver = {
94 .name = "vfio-platform", 94 .name = "vfio-platform",
95 .owner = THIS_MODULE,
96 }, 95 },
97}; 96};
98 97
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index a1c50d630792..418cdd9ba3f4 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -51,13 +51,10 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
51 51
52static void vfio_platform_get_reset(struct vfio_platform_device *vdev) 52static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
53{ 53{
54 char modname[256];
55
56 vdev->reset = vfio_platform_lookup_reset(vdev->compat, 54 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
57 &vdev->reset_module); 55 &vdev->reset_module);
58 if (!vdev->reset) { 56 if (!vdev->reset) {
59 snprintf(modname, 256, "vfio-reset:%s", vdev->compat); 57 request_module("vfio-reset:%s", vdev->compat);
60 request_module(modname);
61 vdev->reset = vfio_platform_lookup_reset(vdev->compat, 58 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
62 &vdev->reset_module); 59 &vdev->reset_module);
63 } 60 }
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index de632da2e22f..6070b793cbcb 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -62,7 +62,6 @@ struct vfio_container {
62 struct rw_semaphore group_lock; 62 struct rw_semaphore group_lock;
63 struct vfio_iommu_driver *iommu_driver; 63 struct vfio_iommu_driver *iommu_driver;
64 void *iommu_data; 64 void *iommu_data;
65 bool noiommu;
66}; 65};
67 66
68struct vfio_unbound_dev { 67struct vfio_unbound_dev {
@@ -85,7 +84,6 @@ struct vfio_group {
85 struct list_head unbound_list; 84 struct list_head unbound_list;
86 struct mutex unbound_lock; 85 struct mutex unbound_lock;
87 atomic_t opened; 86 atomic_t opened;
88 bool noiommu;
89}; 87};
90 88
91struct vfio_device { 89struct vfio_device {
@@ -97,147 +95,6 @@ struct vfio_device {
97 void *device_data; 95 void *device_data;
98}; 96};
99 97
100#ifdef CONFIG_VFIO_NOIOMMU
101static bool noiommu __read_mostly;
102module_param_named(enable_unsafe_noiommu_support,
103 noiommu, bool, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
105#endif
106
107/*
108 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
109 * and remove functions, any use cases other than acquiring the first
110 * reference for the purpose of calling vfio_add_group_dev() or removing
111 * that symmetric reference after vfio_del_group_dev() should use the raw
112 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
113 * removes the device from the dummy group and cannot be nested.
114 */
115struct iommu_group *vfio_iommu_group_get(struct device *dev)
116{
117 struct iommu_group *group;
118 int __maybe_unused ret;
119
120 group = iommu_group_get(dev);
121
122#ifdef CONFIG_VFIO_NOIOMMU
123 /*
124 * With noiommu enabled, an IOMMU group will be created for a device
125 * that doesn't already have one and doesn't have an iommu_ops on their
126 * bus. We use iommu_present() again in the main code to detect these
127 * fake groups.
128 */
129 if (group || !noiommu || iommu_present(dev->bus))
130 return group;
131
132 group = iommu_group_alloc();
133 if (IS_ERR(group))
134 return NULL;
135
136 iommu_group_set_name(group, "vfio-noiommu");
137 ret = iommu_group_add_device(group, dev);
138 iommu_group_put(group);
139 if (ret)
140 return NULL;
141
142 /*
143 * Where to taint? At this point we've added an IOMMU group for a
144 * device that is not backed by iommu_ops, therefore any iommu_
145 * callback using iommu_ops can legitimately Oops. So, while we may
146 * be about to give a DMA capable device to a user without IOMMU
147 * protection, which is clearly taint-worthy, let's go ahead and do
148 * it here.
149 */
150 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
151 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
152#endif
153
154 return group;
155}
156EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
157
158void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
159{
160#ifdef CONFIG_VFIO_NOIOMMU
161 if (!iommu_present(dev->bus))
162 iommu_group_remove_device(dev);
163#endif
164
165 iommu_group_put(group);
166}
167EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
168
169#ifdef CONFIG_VFIO_NOIOMMU
170static void *vfio_noiommu_open(unsigned long arg)
171{
172 if (arg != VFIO_NOIOMMU_IOMMU)
173 return ERR_PTR(-EINVAL);
174 if (!capable(CAP_SYS_RAWIO))
175 return ERR_PTR(-EPERM);
176
177 return NULL;
178}
179
180static void vfio_noiommu_release(void *iommu_data)
181{
182}
183
184static long vfio_noiommu_ioctl(void *iommu_data,
185 unsigned int cmd, unsigned long arg)
186{
187 if (cmd == VFIO_CHECK_EXTENSION)
188 return arg == VFIO_NOIOMMU_IOMMU ? 1 : 0;
189
190 return -ENOTTY;
191}
192
193static int vfio_iommu_present(struct device *dev, void *unused)
194{
195 return iommu_present(dev->bus) ? 1 : 0;
196}
197
198static int vfio_noiommu_attach_group(void *iommu_data,
199 struct iommu_group *iommu_group)
200{
201 return iommu_group_for_each_dev(iommu_group, NULL,
202 vfio_iommu_present) ? -EINVAL : 0;
203}
204
205static void vfio_noiommu_detach_group(void *iommu_data,
206 struct iommu_group *iommu_group)
207{
208}
209
210static struct vfio_iommu_driver_ops vfio_noiommu_ops = {
211 .name = "vfio-noiommu",
212 .owner = THIS_MODULE,
213 .open = vfio_noiommu_open,
214 .release = vfio_noiommu_release,
215 .ioctl = vfio_noiommu_ioctl,
216 .attach_group = vfio_noiommu_attach_group,
217 .detach_group = vfio_noiommu_detach_group,
218};
219
220static struct vfio_iommu_driver vfio_noiommu_driver = {
221 .ops = &vfio_noiommu_ops,
222};
223
224/*
225 * Wrap IOMMU drivers, the noiommu driver is the one and only driver for
226 * noiommu groups (and thus containers) and not available for normal groups.
227 */
228#define vfio_for_each_iommu_driver(con, pos) \
229 for (pos = con->noiommu ? &vfio_noiommu_driver : \
230 list_first_entry(&vfio.iommu_drivers_list, \
231 struct vfio_iommu_driver, vfio_next); \
232 (con->noiommu ? pos != NULL : \
233 &pos->vfio_next != &vfio.iommu_drivers_list); \
234 pos = con->noiommu ? NULL : list_next_entry(pos, vfio_next))
235#else
236#define vfio_for_each_iommu_driver(con, pos) \
237 list_for_each_entry(pos, &vfio.iommu_drivers_list, vfio_next)
238#endif
239
240
241/** 98/**
242 * IOMMU driver registration 99 * IOMMU driver registration
243 */ 100 */
@@ -342,8 +199,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
342/** 199/**
343 * Group objects - create, release, get, put, search 200 * Group objects - create, release, get, put, search
344 */ 201 */
345static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, 202static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
346 bool noiommu)
347{ 203{
348 struct vfio_group *group, *tmp; 204 struct vfio_group *group, *tmp;
349 struct device *dev; 205 struct device *dev;
@@ -361,7 +217,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
361 atomic_set(&group->container_users, 0); 217 atomic_set(&group->container_users, 0);
362 atomic_set(&group->opened, 0); 218 atomic_set(&group->opened, 0);
363 group->iommu_group = iommu_group; 219 group->iommu_group = iommu_group;
364 group->noiommu = noiommu;
365 220
366 group->nb.notifier_call = vfio_iommu_group_notifier; 221 group->nb.notifier_call = vfio_iommu_group_notifier;
367 222
@@ -397,8 +252,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
397 252
398 dev = device_create(vfio.class, NULL, 253 dev = device_create(vfio.class, NULL,
399 MKDEV(MAJOR(vfio.group_devt), minor), 254 MKDEV(MAJOR(vfio.group_devt), minor),
400 group, "%s%d", noiommu ? "noiommu-" : "", 255 group, "%d", iommu_group_id(iommu_group));
401 iommu_group_id(iommu_group));
402 if (IS_ERR(dev)) { 256 if (IS_ERR(dev)) {
403 vfio_free_group_minor(minor); 257 vfio_free_group_minor(minor);
404 vfio_group_unlock_and_free(group); 258 vfio_group_unlock_and_free(group);
@@ -682,7 +536,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
682 return 0; 536 return 0;
683 537
684 /* TODO Prevent device auto probing */ 538 /* TODO Prevent device auto probing */
685 WARN("Device %s added to live group %d!\n", dev_name(dev), 539 WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
686 iommu_group_id(group->iommu_group)); 540 iommu_group_id(group->iommu_group));
687 541
688 return 0; 542 return 0;
@@ -786,8 +640,7 @@ int vfio_add_group_dev(struct device *dev,
786 640
787 group = vfio_group_get_from_iommu(iommu_group); 641 group = vfio_group_get_from_iommu(iommu_group);
788 if (!group) { 642 if (!group) {
789 group = vfio_create_group(iommu_group, 643 group = vfio_create_group(iommu_group);
790 !iommu_present(dev->bus));
791 if (IS_ERR(group)) { 644 if (IS_ERR(group)) {
792 iommu_group_put(iommu_group); 645 iommu_group_put(iommu_group);
793 return PTR_ERR(group); 646 return PTR_ERR(group);
@@ -999,7 +852,8 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
999 */ 852 */
1000 if (!driver) { 853 if (!driver) {
1001 mutex_lock(&vfio.iommu_drivers_lock); 854 mutex_lock(&vfio.iommu_drivers_lock);
1002 vfio_for_each_iommu_driver(container, driver) { 855 list_for_each_entry(driver, &vfio.iommu_drivers_list,
856 vfio_next) {
1003 if (!try_module_get(driver->ops->owner)) 857 if (!try_module_get(driver->ops->owner))
1004 continue; 858 continue;
1005 859
@@ -1068,7 +922,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
1068 } 922 }
1069 923
1070 mutex_lock(&vfio.iommu_drivers_lock); 924 mutex_lock(&vfio.iommu_drivers_lock);
1071 vfio_for_each_iommu_driver(container, driver) { 925 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1072 void *data; 926 void *data;
1073 927
1074 if (!try_module_get(driver->ops->owner)) 928 if (!try_module_get(driver->ops->owner))
@@ -1333,9 +1187,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1333 if (atomic_read(&group->container_users)) 1187 if (atomic_read(&group->container_users))
1334 return -EINVAL; 1188 return -EINVAL;
1335 1189
1336 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1337 return -EPERM;
1338
1339 f = fdget(container_fd); 1190 f = fdget(container_fd);
1340 if (!f.file) 1191 if (!f.file)
1341 return -EBADF; 1192 return -EBADF;
@@ -1351,13 +1202,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1351 1202
1352 down_write(&container->group_lock); 1203 down_write(&container->group_lock);
1353 1204
1354 /* Real groups and fake groups cannot mix */
1355 if (!list_empty(&container->group_list) &&
1356 container->noiommu != group->noiommu) {
1357 ret = -EPERM;
1358 goto unlock_out;
1359 }
1360
1361 driver = container->iommu_driver; 1205 driver = container->iommu_driver;
1362 if (driver) { 1206 if (driver) {
1363 ret = driver->ops->attach_group(container->iommu_data, 1207 ret = driver->ops->attach_group(container->iommu_data,
@@ -1367,7 +1211,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1367 } 1211 }
1368 1212
1369 group->container = container; 1213 group->container = container;
1370 container->noiommu = group->noiommu;
1371 list_add(&group->container_next, &container->group_list); 1214 list_add(&group->container_next, &container->group_list);
1372 1215
1373 /* Get a reference on the container and mark a user within the group */ 1216 /* Get a reference on the container and mark a user within the group */
@@ -1398,9 +1241,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1398 !group->container->iommu_driver || !vfio_group_viable(group)) 1241 !group->container->iommu_driver || !vfio_group_viable(group))
1399 return -EINVAL; 1242 return -EINVAL;
1400 1243
1401 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1402 return -EPERM;
1403
1404 device = vfio_device_get_from_name(group, buf); 1244 device = vfio_device_get_from_name(group, buf);
1405 if (!device) 1245 if (!device)
1406 return -ENODEV; 1246 return -ENODEV;
@@ -1443,10 +1283,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1443 1283
1444 fd_install(ret, filep); 1284 fd_install(ret, filep);
1445 1285
1446 if (group->noiommu)
1447 dev_warn(device->dev, "vfio-noiommu device opened by user "
1448 "(%s:%d)\n", current->comm, task_pid_nr(current));
1449
1450 return ret; 1286 return ret;
1451} 1287}
1452 1288
@@ -1535,11 +1371,6 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1535 if (!group) 1371 if (!group)
1536 return -ENODEV; 1372 return -ENODEV;
1537 1373
1538 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1539 vfio_group_put(group);
1540 return -EPERM;
1541 }
1542
1543 /* Do we need multiple instances of the group open? Seems not. */ 1374 /* Do we need multiple instances of the group open? Seems not. */
1544 opened = atomic_cmpxchg(&group->opened, 0, 1); 1375 opened = atomic_cmpxchg(&group->opened, 0, 1);
1545 if (opened) { 1376 if (opened) {
@@ -1702,11 +1533,6 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
1702 if (!atomic_inc_not_zero(&group->container_users)) 1533 if (!atomic_inc_not_zero(&group->container_users))
1703 return ERR_PTR(-EINVAL); 1534 return ERR_PTR(-EINVAL);
1704 1535
1705 if (group->noiommu) {
1706 atomic_dec(&group->container_users);
1707 return ERR_PTR(-EPERM);
1708 }
1709
1710 if (!group->container->iommu_driver || 1536 if (!group->container->iommu_driver ||
1711 !vfio_group_viable(group)) { 1537 !vfio_group_viable(group)) {
1712 atomic_dec(&group->container_users); 1538 atomic_dec(&group->container_users);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eec2f11809ff..ad2146a9ab2d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -819,7 +819,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
819 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 819 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
820 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 820 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
821 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 821 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
822 (a.log_guest_addr & (sizeof(u64) - 1))) { 822 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
823 r = -EINVAL; 823 r = -EINVAL;
824 break; 824 break;
825 } 825 }
@@ -1369,7 +1369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1369 /* Grab the next descriptor number they're advertising, and increment 1369 /* Grab the next descriptor number they're advertising, and increment
1370 * the index we've seen. */ 1370 * the index we've seen. */
1371 if (unlikely(__get_user(ring_head, 1371 if (unlikely(__get_user(ring_head,
1372 &vq->avail->ring[last_avail_idx % vq->num]))) { 1372 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
1373 vq_err(vq, "Failed to read head: idx %d address %p\n", 1373 vq_err(vq, "Failed to read head: idx %d address %p\n",
1374 last_avail_idx, 1374 last_avail_idx,
1375 &vq->avail->ring[last_avail_idx % vq->num]); 1375 &vq->avail->ring[last_avail_idx % vq->num]);
@@ -1489,7 +1489,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1489 u16 old, new; 1489 u16 old, new;
1490 int start; 1490 int start;
1491 1491
1492 start = vq->last_used_idx % vq->num; 1492 start = vq->last_used_idx & (vq->num - 1);
1493 used = vq->used->ring + start; 1493 used = vq->used->ring + start;
1494 if (count == 1) { 1494 if (count == 1) {
1495 if (__put_user(heads[0].id, &used->id)) { 1495 if (__put_user(heads[0].id, &used->id)) {
@@ -1531,7 +1531,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1531{ 1531{
1532 int start, n, r; 1532 int start, n, r;
1533 1533
1534 start = vq->last_used_idx % vq->num; 1534 start = vq->last_used_idx & (vq->num - 1);
1535 n = vq->num - start; 1535 n = vq->num - start;
1536 if (n < count) { 1536 if (n < count) {
1537 r = __vhost_add_used_n(vq, heads, n); 1537 r = __vhost_add_used_n(vq, heads, n);
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index b335c1ae8625..fe00a07c122e 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -479,7 +479,10 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
479 port = FSL_DIU_PORT_DLVDS; 479 port = FSL_DIU_PORT_DLVDS;
480 } 480 }
481 481
482 return diu_ops.valid_monitor_port(port); 482 if (diu_ops.valid_monitor_port)
483 port = diu_ops.valid_monitor_port(port);
484
485 return port;
483} 486}
484 487
485/* 488/*
@@ -1915,6 +1918,14 @@ static int __init fsl_diu_init(void)
1915#else 1918#else
1916 monitor_port = fsl_diu_name_to_port(monitor_string); 1919 monitor_port = fsl_diu_name_to_port(monitor_string);
1917#endif 1920#endif
1921
1922 /*
1923 * Must to verify set_pixel_clock. If not implement on platform,
1924 * then that means that there is no platform support for the DIU.
1925 */
1926 if (!diu_ops.set_pixel_clock)
1927 return -ENODEV;
1928
1918 pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); 1929 pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
1919 1930
1920#ifdef CONFIG_NOT_COHERENT_CACHE 1931#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 99ca268c1cdd..d05a54922ba6 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -275,6 +275,12 @@ const struct omap_video_timings omap_dss_pal_timings = {
275 .vbp = 41, 275 .vbp = 41,
276 276
277 .interlace = true, 277 .interlace = true,
278
279 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
280 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
281 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
282 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
283 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
278}; 284};
279EXPORT_SYMBOL(omap_dss_pal_timings); 285EXPORT_SYMBOL(omap_dss_pal_timings);
280 286
@@ -290,6 +296,12 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
290 .vbp = 31, 296 .vbp = 31,
291 297
292 .interlace = true, 298 .interlace = true,
299
300 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
301 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
302 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
303 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
304 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
293}; 305};
294EXPORT_SYMBOL(omap_dss_ntsc_timings); 306EXPORT_SYMBOL(omap_dss_ntsc_timings);
295 307
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b1877d73fa56..7062bb0975a5 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -412,6 +412,7 @@ static int virtio_init(void)
412static void __exit virtio_exit(void) 412static void __exit virtio_exit(void)
413{ 413{
414 bus_unregister(&virtio_bus); 414 bus_unregister(&virtio_bus);
415 ida_destroy(&virtio_index_ida);
415} 416}
416core_initcall(virtio_init); 417core_initcall(virtio_init);
417module_exit(virtio_exit); 418module_exit(virtio_exit);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 096b857e7b75..ee663c458b20 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -80,6 +80,12 @@ struct vring_virtqueue {
80 /* Last used index we've seen. */ 80 /* Last used index we've seen. */
81 u16 last_used_idx; 81 u16 last_used_idx;
82 82
83 /* Last written value to avail->flags */
84 u16 avail_flags_shadow;
85
86 /* Last written value to avail->idx in guest byte order */
87 u16 avail_idx_shadow;
88
83 /* How to notify other side. FIXME: commonalize hcalls! */ 89 /* How to notify other side. FIXME: commonalize hcalls! */
84 bool (*notify)(struct virtqueue *vq); 90 bool (*notify)(struct virtqueue *vq);
85 91
@@ -109,7 +115,7 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
109 * otherwise virt_to_phys will give us bogus addresses in the 115 * otherwise virt_to_phys will give us bogus addresses in the
110 * virtqueue. 116 * virtqueue.
111 */ 117 */
112 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); 118 gfp &= ~__GFP_HIGHMEM;
113 119
114 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 120 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
115 if (!desc) 121 if (!desc)
@@ -235,13 +241,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
235 241
236 /* Put entry in available array (but don't update avail->idx until they 242 /* Put entry in available array (but don't update avail->idx until they
237 * do sync). */ 243 * do sync). */
238 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); 244 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
239 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 245 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
240 246
241 /* Descriptors and available array need to be set before we expose the 247 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 248 * new available array entries. */
243 virtio_wmb(vq->weak_barriers); 249 virtio_wmb(vq->weak_barriers);
244 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); 250 vq->avail_idx_shadow++;
251 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
245 vq->num_added++; 252 vq->num_added++;
246 253
247 pr_debug("Added buffer head %i to %p\n", head, vq); 254 pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -354,8 +361,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
354 * event. */ 361 * event. */
355 virtio_mb(vq->weak_barriers); 362 virtio_mb(vq->weak_barriers);
356 363
357 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; 364 old = vq->avail_idx_shadow - vq->num_added;
358 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); 365 new = vq->avail_idx_shadow;
359 vq->num_added = 0; 366 vq->num_added = 0;
360 367
361#ifdef DEBUG 368#ifdef DEBUG
@@ -510,7 +517,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
510 /* If we expect an interrupt for the next entry, tell host 517 /* If we expect an interrupt for the next entry, tell host
511 * by writing event index and flush out the write before 518 * by writing event index and flush out the write before
512 * the read in the next get_buf call. */ 519 * the read in the next get_buf call. */
513 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { 520 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
514 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); 521 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
515 virtio_mb(vq->weak_barriers); 522 virtio_mb(vq->weak_barriers);
516 } 523 }
@@ -537,7 +544,11 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
537{ 544{
538 struct vring_virtqueue *vq = to_vvq(_vq); 545 struct vring_virtqueue *vq = to_vvq(_vq);
539 546
540 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); 547 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
548 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
549 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
550 }
551
541} 552}
542EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 553EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
543 554
@@ -565,7 +576,10 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
565 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 576 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
566 * either clear the flags bit or point the event index at the next 577 * either clear the flags bit or point the event index at the next
567 * entry. Always do both to keep code simple. */ 578 * entry. Always do both to keep code simple. */
568 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); 579 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
580 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
581 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
582 }
569 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 583 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
570 END_USE(vq); 584 END_USE(vq);
571 return last_used_idx; 585 return last_used_idx;
@@ -633,9 +647,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
633 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 647 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
634 * either clear the flags bit or point the event index at the next 648 * either clear the flags bit or point the event index at the next
635 * entry. Always do both to keep code simple. */ 649 * entry. Always do both to keep code simple. */
636 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); 650 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
651 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
652 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
653 }
637 /* TODO: tune this threshold */ 654 /* TODO: tune this threshold */
638 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; 655 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
639 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); 656 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
640 virtio_mb(vq->weak_barriers); 657 virtio_mb(vq->weak_barriers);
641 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 658 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
@@ -670,7 +687,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
670 /* detach_buf clears data, so grab it now. */ 687 /* detach_buf clears data, so grab it now. */
671 buf = vq->data[i]; 688 buf = vq->data[i];
672 detach_buf(vq, i); 689 detach_buf(vq, i);
673 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); 690 vq->avail_idx_shadow--;
691 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
674 END_USE(vq); 692 END_USE(vq);
675 return buf; 693 return buf;
676 } 694 }
@@ -735,6 +753,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
735 vq->weak_barriers = weak_barriers; 753 vq->weak_barriers = weak_barriers;
736 vq->broken = false; 754 vq->broken = false;
737 vq->last_used_idx = 0; 755 vq->last_used_idx = 0;
756 vq->avail_flags_shadow = 0;
757 vq->avail_idx_shadow = 0;
738 vq->num_added = 0; 758 vq->num_added = 0;
739 list_add_tail(&vq->vq.list, &vdev->vqs); 759 list_add_tail(&vq->vq.list, &vdev->vqs);
740#ifdef DEBUG 760#ifdef DEBUG
@@ -746,8 +766,10 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
746 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 766 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
747 767
748 /* No callback? Tell other side not to bother us. */ 768 /* No callback? Tell other side not to bother us. */
749 if (!callback) 769 if (!callback) {
750 vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); 770 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
771 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
772 }
751 773
752 /* Put everything in free lists. */ 774 /* Put everything in free lists. */
753 vq->free_head = 0; 775 vq->free_head = 0;
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index e3e9e3d46d1b..96a1b8da5371 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
281 281
282static void consume_one_event(unsigned cpu, 282static void consume_one_event(unsigned cpu,
283 struct evtchn_fifo_control_block *control_block, 283 struct evtchn_fifo_control_block *control_block,
284 unsigned priority, unsigned long *ready) 284 unsigned priority, unsigned long *ready,
285 bool drop)
285{ 286{
286 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
287 uint32_t head; 288 uint32_t head;
@@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
313 if (head == 0) 314 if (head == 0)
314 clear_bit(priority, ready); 315 clear_bit(priority, ready);
315 316
316 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 317 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
317 handle_irq_for_port(port); 318 if (unlikely(drop))
319 pr_warn("Dropping pending event for port %u\n", port);
320 else
321 handle_irq_for_port(port);
322 }
318 323
319 q->head[priority] = head; 324 q->head[priority] = head;
320} 325}
321 326
322static void evtchn_fifo_handle_events(unsigned cpu) 327static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
323{ 328{
324 struct evtchn_fifo_control_block *control_block; 329 struct evtchn_fifo_control_block *control_block;
325 unsigned long ready; 330 unsigned long ready;
@@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
331 336
332 while (ready) { 337 while (ready) {
333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 338 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
334 consume_one_event(cpu, control_block, q, &ready); 339 consume_one_event(cpu, control_block, q, &ready, drop);
335 ready |= xchg(&control_block->ready, 0); 340 ready |= xchg(&control_block->ready, 0);
336 } 341 }
337} 342}
338 343
344static void evtchn_fifo_handle_events(unsigned cpu)
345{
346 __evtchn_fifo_handle_events(cpu, false);
347}
348
339static void evtchn_fifo_resume(void) 349static void evtchn_fifo_resume(void)
340{ 350{
341 unsigned cpu; 351 unsigned cpu;
@@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
420 if (!per_cpu(cpu_control_block, cpu)) 430 if (!per_cpu(cpu_control_block, cpu))
421 ret = evtchn_fifo_alloc_control_block(cpu); 431 ret = evtchn_fifo_alloc_control_block(cpu);
422 break; 432 break;
433 case CPU_DEAD:
434 __evtchn_fifo_handle_events(cpu, true);
435 break;
423 default: 436 default:
424 break; 437 break;
425 } 438 }
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 58e38d586f52..4d529f3e40df 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -37,6 +37,7 @@ struct xen_pcibk_device {
37 struct xen_pci_sharedinfo *sh_info; 37 struct xen_pci_sharedinfo *sh_info;
38 unsigned long flags; 38 unsigned long flags;
39 struct work_struct op_work; 39 struct work_struct op_work;
40 struct xen_pci_op op;
40}; 41};
41 42
42struct xen_pcibk_dev_data { 43struct xen_pcibk_dev_data {
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index c4a0666de6f5..73dafdc494aa 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
70 enable ? "enable" : "disable"); 70 enable ? "enable" : "disable");
71 71
72 if (enable) { 72 if (enable) {
73 /*
74 * The MSI or MSI-X should not have an IRQ handler. Otherwise
75 * if the guest terminates we BUG_ON in free_msi_irqs.
76 */
77 if (dev->msi_enabled || dev->msix_enabled)
78 goto out;
79
73 rc = request_irq(dev_data->irq, 80 rc = request_irq(dev_data->irq,
74 xen_pcibk_guest_interrupt, IRQF_SHARED, 81 xen_pcibk_guest_interrupt, IRQF_SHARED,
75 dev_data->irq_name, dev); 82 dev_data->irq_name, dev);
@@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
144 if (unlikely(verbose_request)) 151 if (unlikely(verbose_request))
145 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); 152 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
146 153
147 status = pci_enable_msi(dev); 154 if (dev->msi_enabled)
155 status = -EALREADY;
156 else if (dev->msix_enabled)
157 status = -ENXIO;
158 else
159 status = pci_enable_msi(dev);
148 160
149 if (status) { 161 if (status) {
150 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", 162 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
@@ -173,20 +185,23 @@ static
173int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, 185int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
174 struct pci_dev *dev, struct xen_pci_op *op) 186 struct pci_dev *dev, struct xen_pci_op *op)
175{ 187{
176 struct xen_pcibk_dev_data *dev_data;
177
178 if (unlikely(verbose_request)) 188 if (unlikely(verbose_request))
179 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", 189 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
180 pci_name(dev)); 190 pci_name(dev));
181 pci_disable_msi(dev);
182 191
192 if (dev->msi_enabled) {
193 struct xen_pcibk_dev_data *dev_data;
194
195 pci_disable_msi(dev);
196
197 dev_data = pci_get_drvdata(dev);
198 if (dev_data)
199 dev_data->ack_intr = 1;
200 }
183 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 201 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
184 if (unlikely(verbose_request)) 202 if (unlikely(verbose_request))
185 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), 203 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
186 op->value); 204 op->value);
187 dev_data = pci_get_drvdata(dev);
188 if (dev_data)
189 dev_data->ack_intr = 1;
190 return 0; 205 return 0;
191} 206}
192 207
@@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
197 struct xen_pcibk_dev_data *dev_data; 212 struct xen_pcibk_dev_data *dev_data;
198 int i, result; 213 int i, result;
199 struct msix_entry *entries; 214 struct msix_entry *entries;
215 u16 cmd;
200 216
201 if (unlikely(verbose_request)) 217 if (unlikely(verbose_request))
202 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", 218 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
203 pci_name(dev)); 219 pci_name(dev));
220
204 if (op->value > SH_INFO_MAX_VEC) 221 if (op->value > SH_INFO_MAX_VEC)
205 return -EINVAL; 222 return -EINVAL;
206 223
224 if (dev->msix_enabled)
225 return -EALREADY;
226
227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside.
230 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO;
234
207 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); 235 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
208 if (entries == NULL) 236 if (entries == NULL)
209 return -ENOMEM; 237 return -ENOMEM;
@@ -245,23 +273,27 @@ static
245int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, 273int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
246 struct pci_dev *dev, struct xen_pci_op *op) 274 struct pci_dev *dev, struct xen_pci_op *op)
247{ 275{
248 struct xen_pcibk_dev_data *dev_data;
249 if (unlikely(verbose_request)) 276 if (unlikely(verbose_request))
250 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", 277 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
251 pci_name(dev)); 278 pci_name(dev));
252 pci_disable_msix(dev);
253 279
280 if (dev->msix_enabled) {
281 struct xen_pcibk_dev_data *dev_data;
282
283 pci_disable_msix(dev);
284
285 dev_data = pci_get_drvdata(dev);
286 if (dev_data)
287 dev_data->ack_intr = 1;
288 }
254 /* 289 /*
255 * SR-IOV devices (which don't have any legacy IRQ) have 290 * SR-IOV devices (which don't have any legacy IRQ) have
256 * an undefined IRQ value of zero. 291 * an undefined IRQ value of zero.
257 */ 292 */
258 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 293 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
259 if (unlikely(verbose_request)) 294 if (unlikely(verbose_request))
260 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), 295 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
261 op->value); 296 pci_name(dev), op->value);
262 dev_data = pci_get_drvdata(dev);
263 if (dev_data)
264 dev_data->ack_intr = 1;
265 return 0; 297 return 0;
266} 298}
267#endif 299#endif
@@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data)
298 container_of(data, struct xen_pcibk_device, op_work); 330 container_of(data, struct xen_pcibk_device, op_work);
299 struct pci_dev *dev; 331 struct pci_dev *dev;
300 struct xen_pcibk_dev_data *dev_data = NULL; 332 struct xen_pcibk_dev_data *dev_data = NULL;
301 struct xen_pci_op *op = &pdev->sh_info->op; 333 struct xen_pci_op *op = &pdev->op;
302 int test_intx = 0; 334 int test_intx = 0;
303 335
336 *op = pdev->sh_info->op;
337 barrier();
304 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); 338 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
305 339
306 if (dev == NULL) 340 if (dev == NULL)
@@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data)
342 if ((dev_data->enable_intx != test_intx)) 376 if ((dev_data->enable_intx != test_intx))
343 xen_pcibk_control_isr(dev, 0 /* no reset */); 377 xen_pcibk_control_isr(dev, 0 /* no reset */);
344 } 378 }
379 pdev->sh_info->op.err = op->err;
380 pdev->sh_info->op.value = op->value;
381#ifdef CONFIG_PCI_MSI
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i;
384
385 for (i = 0; i < op->value; i++)
386 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector;
388 }
389#endif
345 /* Tell the driver domain that we're done. */ 390 /* Tell the driver domain that we're done. */
346 wmb(); 391 wmb();
347 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); 392 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 98bc345f296e..4843741e703a 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); 44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
45 45
46 pdev->xdev = xdev; 46 pdev->xdev = xdev;
47 dev_set_drvdata(&xdev->dev, pdev);
48 47
49 mutex_init(&pdev->dev_lock); 48 mutex_init(&pdev->dev_lock);
50 49
@@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
58 kfree(pdev); 57 kfree(pdev);
59 pdev = NULL; 58 pdev = NULL;
60 } 59 }
60
61 dev_set_drvdata(&xdev->dev, pdev);
62
61out: 63out:
62 return pdev; 64 return pdev;
63} 65}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 43bcae852546..ad4eb1024d1f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
726 if (!pending_req) 726 if (!pending_req)
727 return 1; 727 return 1;
728 728
729 ring_req = *RING_GET_REQUEST(ring, rc); 729 RING_COPY_REQUEST(ring, rc, &ring_req);
730 ring->req_cons = ++rc; 730 ring->req_cons = ++rc;
731 731
732 err = prepare_pending_reqs(info, &ring_req, pending_req); 732 err = prepare_pending_reqs(info, &ring_req, pending_req);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 699941e90667..511078586fa1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -451,9 +451,9 @@ void v9fs_evict_inode(struct inode *inode)
451{ 451{
452 struct v9fs_inode *v9inode = V9FS_I(inode); 452 struct v9fs_inode *v9inode = V9FS_I(inode);
453 453
454 truncate_inode_pages_final(inode->i_mapping); 454 truncate_inode_pages_final(&inode->i_data);
455 clear_inode(inode); 455 clear_inode(inode);
456 filemap_fdatawrite(inode->i_mapping); 456 filemap_fdatawrite(&inode->i_data);
457 457
458 v9fs_cache_inode_put_cookie(inode); 458 v9fs_cache_inode_put_cookie(inode);
459 /* clunk the fid stashed in writeback_fid */ 459 /* clunk the fid stashed in writeback_fid */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c25639e907bd..44d4a1e9244e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1523,11 +1523,14 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1523 WARN_ON_ONCE(bdev->bd_holders); 1523 WARN_ON_ONCE(bdev->bd_holders);
1524 sync_blockdev(bdev); 1524 sync_blockdev(bdev);
1525 kill_bdev(bdev); 1525 kill_bdev(bdev);
1526
1527 bdev_write_inode(bdev);
1526 /* 1528 /*
1527 * ->release can cause the queue to disappear, so flush all 1529 * Detaching bdev inode from its wb in __destroy_inode()
1528 * dirty data before. 1530 * is too late: the queue which embeds its bdi (along with
1531 * root wb) can be gone as soon as we put_disk() below.
1529 */ 1532 */
1530 bdev_write_inode(bdev); 1533 inode_detach_wb(bdev->bd_inode);
1531 } 1534 }
1532 if (bdev->bd_contains == bdev) { 1535 if (bdev->bd_contains == bdev) {
1533 if (disk->fops->release) 1536 if (disk->fops->release)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4b89680a1923..c4661db2b72a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -10480,11 +10480,15 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10480 * until transaction commit to do the actual discard. 10480 * until transaction commit to do the actual discard.
10481 */ 10481 */
10482 if (trimming) { 10482 if (trimming) {
10483 WARN_ON(!list_empty(&block_group->bg_list)); 10483 spin_lock(&fs_info->unused_bgs_lock);
10484 spin_lock(&trans->transaction->deleted_bgs_lock); 10484 /*
10485 * A concurrent scrub might have added us to the list
10486 * fs_info->unused_bgs, so use a list_move operation
10487 * to add the block group to the deleted_bgs list.
10488 */
10485 list_move(&block_group->bg_list, 10489 list_move(&block_group->bg_list,
10486 &trans->transaction->deleted_bgs); 10490 &trans->transaction->deleted_bgs);
10487 spin_unlock(&trans->transaction->deleted_bgs_lock); 10491 spin_unlock(&fs_info->unused_bgs_lock);
10488 btrfs_get_block_group(block_group); 10492 btrfs_get_block_group(block_group);
10489 } 10493 }
10490end_trans: 10494end_trans:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 72e73461c064..0f09526aa7d9 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1291,7 +1291,8 @@ out:
1291 * on error we return an unlocked page and the error value 1291 * on error we return an unlocked page and the error value
1292 * on success we return a locked page and 0 1292 * on success we return a locked page and 0
1293 */ 1293 */
1294static int prepare_uptodate_page(struct page *page, u64 pos, 1294static int prepare_uptodate_page(struct inode *inode,
1295 struct page *page, u64 pos,
1295 bool force_uptodate) 1296 bool force_uptodate)
1296{ 1297{
1297 int ret = 0; 1298 int ret = 0;
@@ -1306,6 +1307,10 @@ static int prepare_uptodate_page(struct page *page, u64 pos,
1306 unlock_page(page); 1307 unlock_page(page);
1307 return -EIO; 1308 return -EIO;
1308 } 1309 }
1310 if (page->mapping != inode->i_mapping) {
1311 unlock_page(page);
1312 return -EAGAIN;
1313 }
1309 } 1314 }
1310 return 0; 1315 return 0;
1311} 1316}
@@ -1324,6 +1329,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1324 int faili; 1329 int faili;
1325 1330
1326 for (i = 0; i < num_pages; i++) { 1331 for (i = 0; i < num_pages; i++) {
1332again:
1327 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1333 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1328 mask | __GFP_WRITE); 1334 mask | __GFP_WRITE);
1329 if (!pages[i]) { 1335 if (!pages[i]) {
@@ -1333,13 +1339,17 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1333 } 1339 }
1334 1340
1335 if (i == 0) 1341 if (i == 0)
1336 err = prepare_uptodate_page(pages[i], pos, 1342 err = prepare_uptodate_page(inode, pages[i], pos,
1337 force_uptodate); 1343 force_uptodate);
1338 if (i == num_pages - 1) 1344 if (!err && i == num_pages - 1)
1339 err = prepare_uptodate_page(pages[i], 1345 err = prepare_uptodate_page(inode, pages[i],
1340 pos + write_bytes, false); 1346 pos + write_bytes, false);
1341 if (err) { 1347 if (err) {
1342 page_cache_release(pages[i]); 1348 page_cache_release(pages[i]);
1349 if (err == -EAGAIN) {
1350 err = 0;
1351 goto again;
1352 }
1343 faili = i - 1; 1353 faili = i - 1;
1344 goto fail; 1354 goto fail;
1345 } 1355 }
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 85a1f8621b51..cfe99bec49de 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -891,7 +891,7 @@ out:
891 spin_unlock(&block_group->lock); 891 spin_unlock(&block_group->lock);
892 ret = 0; 892 ret = 0;
893 893
894 btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", 894 btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
895 block_group->key.objectid); 895 block_group->key.objectid);
896 } 896 }
897 897
@@ -2972,7 +2972,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2972 u64 cont1_bytes, u64 min_bytes) 2972 u64 cont1_bytes, u64 min_bytes)
2973{ 2973{
2974 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2974 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2975 struct btrfs_free_space *entry; 2975 struct btrfs_free_space *entry = NULL;
2976 int ret = -ENOSPC; 2976 int ret = -ENOSPC;
2977 u64 bitmap_offset = offset_to_bitmap(ctl, offset); 2977 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2978 2978
@@ -2983,8 +2983,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2983 * The bitmap that covers offset won't be in the list unless offset 2983 * The bitmap that covers offset won't be in the list unless offset
2984 * is just its start offset. 2984 * is just its start offset.
2985 */ 2985 */
2986 entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 2986 if (!list_empty(bitmaps))
2987 if (entry->offset != bitmap_offset) { 2987 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2988
2989 if (!entry || entry->offset != bitmap_offset) {
2988 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); 2990 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2989 if (entry && list_empty(&entry->list)) 2991 if (entry && list_empty(&entry->list))
2990 list_add(&entry->list, bitmaps); 2992 list_add(&entry->list, bitmaps);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3367a3c6f214..be8eae80ff65 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -274,7 +274,6 @@ loop:
274 cur_trans->num_dirty_bgs = 0; 274 cur_trans->num_dirty_bgs = 0;
275 spin_lock_init(&cur_trans->dirty_bgs_lock); 275 spin_lock_init(&cur_trans->dirty_bgs_lock);
276 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 276 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
277 spin_lock_init(&cur_trans->deleted_bgs_lock);
278 spin_lock_init(&cur_trans->dropped_roots_lock); 277 spin_lock_init(&cur_trans->dropped_roots_lock);
279 list_add_tail(&cur_trans->list, &fs_info->trans_list); 278 list_add_tail(&cur_trans->list, &fs_info->trans_list);
280 extent_io_tree_init(&cur_trans->dirty_pages, 279 extent_io_tree_init(&cur_trans->dirty_pages,
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 0da21ca9b3fb..64c8221b6165 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -77,8 +77,8 @@ struct btrfs_transaction {
77 */ 77 */
78 struct mutex cache_write_mutex; 78 struct mutex cache_write_mutex;
79 spinlock_t dirty_bgs_lock; 79 spinlock_t dirty_bgs_lock;
80 /* Protected by spin lock fs_info->unused_bgs_lock. */
80 struct list_head deleted_bgs; 81 struct list_head deleted_bgs;
81 spinlock_t deleted_bgs_lock;
82 spinlock_t dropped_roots_lock; 82 spinlock_t dropped_roots_lock;
83 struct btrfs_delayed_ref_root delayed_refs; 83 struct btrfs_delayed_ref_root delayed_refs;
84 int aborted; 84 int aborted;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 456452206609..a23399e8e3ab 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3548,12 +3548,11 @@ again:
3548 3548
3549 ret = btrfs_force_chunk_alloc(trans, chunk_root, 3549 ret = btrfs_force_chunk_alloc(trans, chunk_root,
3550 BTRFS_BLOCK_GROUP_DATA); 3550 BTRFS_BLOCK_GROUP_DATA);
3551 btrfs_end_transaction(trans, chunk_root);
3551 if (ret < 0) { 3552 if (ret < 0) {
3552 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3553 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3553 goto error; 3554 goto error;
3554 } 3555 }
3555
3556 btrfs_end_transaction(trans, chunk_root);
3557 chunk_reserved = 1; 3556 chunk_reserved = 1;
3558 } 3557 }
3559 3558
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6b66dd5d1540..a329f5ba35aa 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1831,11 +1831,11 @@ cifs_invalidate_mapping(struct inode *inode)
1831 * @word: long word containing the bit lock 1831 * @word: long word containing the bit lock
1832 */ 1832 */
1833static int 1833static int
1834cifs_wait_bit_killable(struct wait_bit_key *key) 1834cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
1835{ 1835{
1836 if (fatal_signal_pending(current))
1837 return -ERESTARTSYS;
1838 freezable_schedule_unsafe(); 1836 freezable_schedule_unsafe();
1837 if (signal_pending_state(mode, current))
1838 return -ERESTARTSYS;
1839 return 0; 1839 return 0;
1840} 1840}
1841 1841
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1c75a3a07f8f..602e8441bc0f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1175,6 +1175,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1175 if (dio->flags & DIO_LOCKING) 1175 if (dio->flags & DIO_LOCKING)
1176 mutex_unlock(&inode->i_mutex); 1176 mutex_unlock(&inode->i_mutex);
1177 kmem_cache_free(dio_cache, dio); 1177 kmem_cache_free(dio_cache, dio);
1178 retval = 0;
1178 goto out; 1179 goto out;
1179 } 1180 }
1180 1181
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 73c64daa0f55..60f03b78914e 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -592,10 +592,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
592 } 592 }
593 unlock_page(page); 593 unlock_page(page);
594 } 594 }
595 if (PageDirty(page) || PageWriteback(page)) 595 *uptodate = PageUptodate(page);
596 *uptodate = true;
597 else
598 *uptodate = PageUptodate(page);
599 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate); 596 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate);
600 return page; 597 return page;
601 } else { 598 } else {
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index af06830bfc00..1a0835073663 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -389,7 +389,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
389 struct ext4_crypto_ctx *ctx; 389 struct ext4_crypto_ctx *ctx;
390 struct page *ciphertext_page = NULL; 390 struct page *ciphertext_page = NULL;
391 struct bio *bio; 391 struct bio *bio;
392 ext4_lblk_t lblk = ex->ee_block; 392 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
393 ext4_fsblk_t pblk = ext4_ext_pblock(ex); 393 ext4_fsblk_t pblk = ext4_ext_pblock(ex);
394 unsigned int len = ext4_ext_get_actual_len(ex); 394 unsigned int len = ext4_ext_get_actual_len(ex);
395 int ret, err = 0; 395 int ret, err = 0;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 750063f7a50c..cc7ca4e87144 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -26,6 +26,7 @@
26#include <linux/seqlock.h> 26#include <linux/seqlock.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/timer.h> 28#include <linux/timer.h>
29#include <linux/version.h>
29#include <linux/wait.h> 30#include <linux/wait.h>
30#include <linux/blockgroup_lock.h> 31#include <linux/blockgroup_lock.h>
31#include <linux/percpu_counter.h> 32#include <linux/percpu_counter.h>
@@ -727,19 +728,55 @@ struct move_extent {
727 <= (EXT4_GOOD_OLD_INODE_SIZE + \ 728 <= (EXT4_GOOD_OLD_INODE_SIZE + \
728 (einode)->i_extra_isize)) \ 729 (einode)->i_extra_isize)) \
729 730
731/*
732 * We use an encoding that preserves the times for extra epoch "00":
733 *
734 * extra msb of adjust for signed
735 * epoch 32-bit 32-bit tv_sec to
736 * bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range
737 * 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31
738 * 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19
739 * 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07
740 * 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25
741 * 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16
742 * 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04
743 * 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22
744 * 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10
745 *
746 * Note that previous versions of the kernel on 64-bit systems would
747 * incorrectly use extra epoch bits 1,1 for dates between 1901 and
748 * 1970. e2fsck will correct this, assuming that it is run on the
749 * affected filesystem before 2242.
750 */
751
730static inline __le32 ext4_encode_extra_time(struct timespec *time) 752static inline __le32 ext4_encode_extra_time(struct timespec *time)
731{ 753{
732 return cpu_to_le32((sizeof(time->tv_sec) > 4 ? 754 u32 extra = sizeof(time->tv_sec) > 4 ?
733 (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) | 755 ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
734 ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK)); 756 return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
735} 757}
736 758
737static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra) 759static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
738{ 760{
739 if (sizeof(time->tv_sec) > 4) 761 if (unlikely(sizeof(time->tv_sec) > 4 &&
740 time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) 762 (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
741 << 32; 763#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
742 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS; 764 /* Handle legacy encoding of pre-1970 dates with epoch
765 * bits 1,1. We assume that by kernel version 4.20,
766 * everyone will have run fsck over the affected
767 * filesystems to correct the problem. (This
768 * backwards compatibility may be removed before this
769 * time, at the discretion of the ext4 developers.)
770 */
771 u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
772 if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
773 extra_bits = 0;
774 time->tv_sec += extra_bits << 32;
775#else
776 time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
777#endif
778 }
779 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
743} 780}
744 781
745#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \ 782#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index abe2401ce405..e8e7af62ac95 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -52,7 +52,7 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook
52 /* Symlink is encrypted */ 52 /* Symlink is encrypted */
53 sd = (struct ext4_encrypted_symlink_data *)caddr; 53 sd = (struct ext4_encrypted_symlink_data *)caddr;
54 cstr.name = sd->encrypted_path; 54 cstr.name = sd->encrypted_path;
55 cstr.len = le32_to_cpu(sd->len); 55 cstr.len = le16_to_cpu(sd->len);
56 if ((cstr.len + 56 if ((cstr.len +
57 sizeof(struct ext4_encrypted_symlink_data) - 1) > 57 sizeof(struct ext4_encrypted_symlink_data) - 1) >
58 max_size) { 58 max_size) {
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 1b57c72f4a00..1420a3c614af 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -358,7 +358,7 @@ static int name##_open(struct inode *inode, struct file *file) \
358 return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \ 358 return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \
359} \ 359} \
360\ 360\
361const struct file_operations ext4_seq_##name##_fops = { \ 361static const struct file_operations ext4_seq_##name##_fops = { \
362 .owner = THIS_MODULE, \ 362 .owner = THIS_MODULE, \
363 .open = name##_open, \ 363 .open = name##_open, \
364 .read = seq_read, \ 364 .read = seq_read, \
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index eae2c11268bc..8e3ee1936c7e 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -549,6 +549,8 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
549 unregister_chrdev_region(cc->cdev->dev, 1); 549 unregister_chrdev_region(cc->cdev->dev, 1);
550 cdev_del(cc->cdev); 550 cdev_del(cc->cdev);
551 } 551 }
552 /* Base reference is now owned by "fud" */
553 fuse_conn_put(&cc->fc);
552 554
553 rc = fuse_dev_release(inode, file); /* puts the base reference */ 555 rc = fuse_dev_release(inode, file); /* puts the base reference */
554 556
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e0faf8f2c868..570ca4053c80 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1049,6 +1049,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1049 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1049 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
1050 flush_dcache_page(page); 1050 flush_dcache_page(page);
1051 1051
1052 iov_iter_advance(ii, tmp);
1052 if (!tmp) { 1053 if (!tmp) {
1053 unlock_page(page); 1054 unlock_page(page);
1054 page_cache_release(page); 1055 page_cache_release(page);
@@ -1061,7 +1062,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1061 req->page_descs[req->num_pages].length = tmp; 1062 req->page_descs[req->num_pages].length = tmp;
1062 req->num_pages++; 1063 req->num_pages++;
1063 1064
1064 iov_iter_advance(ii, tmp);
1065 count += tmp; 1065 count += tmp;
1066 pos += tmp; 1066 pos += tmp;
1067 offset += tmp; 1067 offset += tmp;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 89463eee6791..ca181e81c765 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1009,7 +1009,8 @@ out:
1009} 1009}
1010 1010
1011/* Fast check whether buffer is already attached to the required transaction */ 1011/* Fast check whether buffer is already attached to the required transaction */
1012static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh) 1012static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1013 bool undo)
1013{ 1014{
1014 struct journal_head *jh; 1015 struct journal_head *jh;
1015 bool ret = false; 1016 bool ret = false;
@@ -1036,6 +1037,9 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
1036 jh = READ_ONCE(bh->b_private); 1037 jh = READ_ONCE(bh->b_private);
1037 if (!jh) 1038 if (!jh)
1038 goto out; 1039 goto out;
1040 /* For undo access buffer must have data copied */
1041 if (undo && !jh->b_committed_data)
1042 goto out;
1039 if (jh->b_transaction != handle->h_transaction && 1043 if (jh->b_transaction != handle->h_transaction &&
1040 jh->b_next_transaction != handle->h_transaction) 1044 jh->b_next_transaction != handle->h_transaction)
1041 goto out; 1045 goto out;
@@ -1073,7 +1077,7 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1073 struct journal_head *jh; 1077 struct journal_head *jh;
1074 int rc; 1078 int rc;
1075 1079
1076 if (jbd2_write_access_granted(handle, bh)) 1080 if (jbd2_write_access_granted(handle, bh, false))
1077 return 0; 1081 return 0;
1078 1082
1079 jh = jbd2_journal_add_journal_head(bh); 1083 jh = jbd2_journal_add_journal_head(bh);
@@ -1210,7 +1214,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1210 char *committed_data = NULL; 1214 char *committed_data = NULL;
1211 1215
1212 JBUFFER_TRACE(jh, "entry"); 1216 JBUFFER_TRACE(jh, "entry");
1213 if (jbd2_write_access_granted(handle, bh)) 1217 if (jbd2_write_access_granted(handle, bh, true))
1214 return 0; 1218 return 0;
1215 1219
1216 jh = jbd2_journal_add_journal_head(bh); 1220 jh = jbd2_journal_add_journal_head(bh);
@@ -2152,6 +2156,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2152 2156
2153 if (!buffer_dirty(bh)) { 2157 if (!buffer_dirty(bh)) {
2154 /* bdflush has written it. We can drop it now */ 2158 /* bdflush has written it. We can drop it now */
2159 __jbd2_journal_remove_checkpoint(jh);
2155 goto zap_buffer; 2160 goto zap_buffer;
2156 } 2161 }
2157 2162
@@ -2181,6 +2186,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2181 /* The orphan record's transaction has 2186 /* The orphan record's transaction has
2182 * committed. We can cleanse this buffer */ 2187 * committed. We can cleanse this buffer */
2183 clear_buffer_jbddirty(bh); 2188 clear_buffer_jbddirty(bh);
2189 __jbd2_journal_remove_checkpoint(jh);
2184 goto zap_buffer; 2190 goto zap_buffer;
2185 } 2191 }
2186 } 2192 }
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index beac58b0e09c..646cdac73488 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -78,8 +78,7 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
78 78
79 p = xdr_inline_decode(xdr, nbytes); 79 p = xdr_inline_decode(xdr, nbytes);
80 if (unlikely(p == NULL)) 80 if (unlikely(p == NULL))
81 printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed " 81 printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
82 "or truncated request.\n");
83 return p; 82 return p;
84} 83}
85 84
@@ -890,7 +889,6 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
890 struct cb_compound_hdr_arg hdr_arg = { 0 }; 889 struct cb_compound_hdr_arg hdr_arg = { 0 };
891 struct cb_compound_hdr_res hdr_res = { NULL }; 890 struct cb_compound_hdr_res hdr_res = { NULL };
892 struct xdr_stream xdr_in, xdr_out; 891 struct xdr_stream xdr_in, xdr_out;
893 struct xdr_buf *rq_arg = &rqstp->rq_arg;
894 __be32 *p, status; 892 __be32 *p, status;
895 struct cb_process_state cps = { 893 struct cb_process_state cps = {
896 .drc_status = 0, 894 .drc_status = 0,
@@ -902,8 +900,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
902 900
903 dprintk("%s: start\n", __func__); 901 dprintk("%s: start\n", __func__);
904 902
905 rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len; 903 xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
906 xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
907 904
908 p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); 905 p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
909 xdr_init_encode(&xdr_out, &rqstp->rq_res, p); 906 xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 31b0a52223a7..c7e8b87da5b2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -75,11 +75,11 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
75 * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks 75 * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks
76 * @word: long word containing the bit lock 76 * @word: long word containing the bit lock
77 */ 77 */
78int nfs_wait_bit_killable(struct wait_bit_key *key) 78int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
79{ 79{
80 if (fatal_signal_pending(current))
81 return -ERESTARTSYS;
82 freezable_schedule_unsafe(); 80 freezable_schedule_unsafe();
81 if (signal_pending_state(mode, current))
82 return -ERESTARTSYS;
83 return 0; 83 return 0;
84} 84}
85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 56cfde26fb9c..9dea85f7f918 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -379,7 +379,7 @@ extern int nfs_drop_inode(struct inode *);
379extern void nfs_clear_inode(struct inode *); 379extern void nfs_clear_inode(struct inode *);
380extern void nfs_evict_inode(struct inode *); 380extern void nfs_evict_inode(struct inode *);
381void nfs_zap_acl_cache(struct inode *inode); 381void nfs_zap_acl_cache(struct inode *inode);
382extern int nfs_wait_bit_killable(struct wait_bit_key *key); 382extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
383 383
384/* super.c */ 384/* super.c */
385extern const struct super_operations nfs_sops; 385extern const struct super_operations nfs_sops;
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 5c0c6b58157f..9aebffb40505 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -476,10 +476,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
476 } 476 }
477 unlock_page(page); 477 unlock_page(page);
478 } 478 }
479 if (PageDirty(page) || PageWriteback(page)) 479 *uptodate = PageUptodate(page);
480 *uptodate = true;
481 else
482 *uptodate = PageUptodate(page);
483 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate); 480 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate);
484 return page; 481 return page;
485} 482}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index fe3ddd20ff89..452a011ba0d8 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -129,7 +129,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
129 set_bit(NFS_IO_INPROGRESS, &c->flags); 129 set_bit(NFS_IO_INPROGRESS, &c->flags);
130 if (atomic_read(&c->io_count) == 0) 130 if (atomic_read(&c->io_count) == 0)
131 break; 131 break;
132 ret = nfs_wait_bit_killable(&q.key); 132 ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE);
133 } while (atomic_read(&c->io_count) != 0 && !ret); 133 } while (atomic_read(&c->io_count) != 0 && !ret);
134 finish_wait(wq, &q.wait); 134 finish_wait(wq, &q.wait);
135 return ret; 135 return ret;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 5a8ae2125b50..bec0384499f7 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1466,11 +1466,11 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1466} 1466}
1467 1467
1468/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */ 1468/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
1469static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key) 1469static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode)
1470{ 1470{
1471 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags)) 1471 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
1472 return 1; 1472 return 1;
1473 return nfs_wait_bit_killable(key); 1473 return nfs_wait_bit_killable(key, mode);
1474} 1474}
1475 1475
1476static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) 1476static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 9ffef06b30d5..c9d6c715c0fb 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -616,6 +616,7 @@ nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
616 616
617 mutex_lock(&ls->ls_mutex); 617 mutex_lock(&ls->ls_mutex);
618 nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid); 618 nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
619 mutex_unlock(&ls->ls_mutex);
619} 620}
620 621
621static int 622static int
@@ -659,7 +660,6 @@ nfsd4_cb_layout_release(struct nfsd4_callback *cb)
659 660
660 trace_layout_recall_release(&ls->ls_stid.sc_stateid); 661 trace_layout_recall_release(&ls->ls_stid.sc_stateid);
661 662
662 mutex_unlock(&ls->ls_mutex);
663 nfsd4_return_all_layouts(ls, &reaplist); 663 nfsd4_return_all_layouts(ls, &reaplist);
664 nfsd4_free_layouts(&reaplist); 664 nfsd4_free_layouts(&reaplist);
665 nfs4_put_stid(&ls->ls_stid); 665 nfs4_put_stid(&ls->ls_stid);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a03f6f433075..3123408da935 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -367,13 +367,11 @@ static int ocfs2_mknod(struct inode *dir,
367 goto leave; 367 goto leave;
368 } 368 }
369 369
370 status = posix_acl_create(dir, &mode, &default_acl, &acl); 370 status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
371 if (status) { 371 if (status) {
372 mlog_errno(status); 372 mlog_errno(status);
373 goto leave; 373 goto leave;
374 } 374 }
375 /* update inode->i_mode after mask with "umask". */
376 inode->i_mode = mode;
377 375
378 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 376 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
379 S_ISDIR(mode), 377 S_ISDIR(mode),
diff --git a/fs/proc/base.c b/fs/proc/base.c
index bd3e9e68125b..4bd5d3118acd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2494,6 +2494,7 @@ static ssize_t proc_coredump_filter_write(struct file *file,
2494 mm = get_task_mm(task); 2494 mm = get_task_mm(task);
2495 if (!mm) 2495 if (!mm)
2496 goto out_no_mm; 2496 goto out_no_mm;
2497 ret = 0;
2497 2498
2498 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { 2499 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
2499 if (val & mask) 2500 if (val & mask)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index db284bff29dc..9dbb739cafa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -5,7 +5,7 @@
5 * Copyright 2001 Red Hat, Inc. 5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others. 6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 * 7 *
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index a8e01aaca087..d7162cf1c3e1 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -587,6 +587,13 @@ struct drm_driver {
587 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); 587 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
588 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); 588 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
589 589
590 /**
591 * Hook for allocating the GEM object struct, for use by core
592 * helpers.
593 */
594 struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
595 size_t size);
596
590 /* prime: */ 597 /* prime: */
591 /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ 598 /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
592 int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, 599 int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
@@ -1061,7 +1068,7 @@ void drm_dev_ref(struct drm_device *dev);
1061void drm_dev_unref(struct drm_device *dev); 1068void drm_dev_unref(struct drm_device *dev);
1062int drm_dev_register(struct drm_device *dev, unsigned long flags); 1069int drm_dev_register(struct drm_device *dev, unsigned long flags);
1063void drm_dev_unregister(struct drm_device *dev); 1070void drm_dev_unregister(struct drm_device *dev);
1064int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...); 1071int drm_dev_set_unique(struct drm_device *dev, const char *name);
1065 1072
1066struct drm_minor *drm_minor_acquire(unsigned int minor_id); 1073struct drm_minor *drm_minor_acquire(unsigned int minor_id);
1067void drm_minor_release(struct drm_minor *minor); 1074void drm_minor_release(struct drm_minor *minor);
@@ -1110,6 +1117,7 @@ static inline int drm_pci_set_busid(struct drm_device *dev,
1110#define DRM_PCIE_SPEED_80 4 1117#define DRM_PCIE_SPEED_80 4
1111 1118
1112extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); 1119extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
1120extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
1113 1121
1114/* platform section */ 1122/* platform section */
1115extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); 1123extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d8576ac55693..d3eaa5df187a 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -130,10 +130,6 @@ int __must_check
130drm_atomic_add_affected_planes(struct drm_atomic_state *state, 130drm_atomic_add_affected_planes(struct drm_atomic_state *state,
131 struct drm_crtc *crtc); 131 struct drm_crtc *crtc);
132 132
133int
134drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
135 struct drm_crtc *crtc);
136
137void drm_atomic_legacy_backoff(struct drm_atomic_state *state); 133void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
138 134
139void 135void
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index a286cce98720..89d008dc08e2 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -126,6 +126,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
126void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, 126void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
127 struct drm_plane_state *state); 127 struct drm_plane_state *state);
128 128
129void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
130 struct drm_connector_state *conn_state);
129void drm_atomic_helper_connector_reset(struct drm_connector *connector); 131void drm_atomic_helper_connector_reset(struct drm_connector *connector);
130void 132void
131__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, 133__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 4765df331002..c65a212db77e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -162,23 +162,60 @@ struct drm_tile_group {
162 u8 group_data[8]; 162 u8 group_data[8];
163}; 163};
164 164
165/**
166 * struct drm_framebuffer_funcs - framebuffer hooks
167 */
165struct drm_framebuffer_funcs { 168struct drm_framebuffer_funcs {
166 /* note: use drm_framebuffer_remove() */ 169 /**
170 * @destroy:
171 *
172 * Clean up framebuffer resources, specifically also unreference the
173 * backing storage. The core guarantees to call this function for every
174 * framebuffer successfully created by ->fb_create() in
175 * &drm_mode_config_funcs. Drivers must also call
176 * drm_framebuffer_cleanup() to release DRM core resources for this
177 * framebuffer.
178 */
167 void (*destroy)(struct drm_framebuffer *framebuffer); 179 void (*destroy)(struct drm_framebuffer *framebuffer);
180
181 /**
182 * @create_handle:
183 *
184 * Create a buffer handle in the driver-specific buffer manager (either
185 * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
186 * the core to implement the GETFB IOCTL, which returns (for
187 * sufficiently priviledged user) also a native buffer handle. This can
188 * be used for seamless transitions between modesetting clients by
189 * copying the current screen contents to a private buffer and blending
190 * between that and the new contents.
191 *
192 * GEM based drivers should call drm_gem_handle_create() to create the
193 * handle.
194 *
195 * RETURNS:
196 *
197 * 0 on success or a negative error code on failure.
198 */
168 int (*create_handle)(struct drm_framebuffer *fb, 199 int (*create_handle)(struct drm_framebuffer *fb,
169 struct drm_file *file_priv, 200 struct drm_file *file_priv,
170 unsigned int *handle); 201 unsigned int *handle);
171 /* 202 /**
172 * Optional callback for the dirty fb ioctl. 203 * @dirty:
204 *
205 * Optional callback for the dirty fb IOCTL.
206 *
207 * Userspace can notify the driver via this callback that an area of the
208 * framebuffer has changed and should be flushed to the display
209 * hardware. This can also be used internally, e.g. by the fbdev
210 * emulation, though that's not the case currently.
173 * 211 *
174 * Userspace can notify the driver via this callback 212 * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
175 * that a area of the framebuffer has changed and should 213 * for more information as all the semantics and arguments have a one to
176 * be flushed to the display hardware. 214 * one mapping on this function.
177 * 215 *
178 * See documentation in drm_mode.h for the struct 216 * RETURNS:
179 * drm_mode_fb_dirty_cmd for more information as all 217 *
180 * the semantics and arguments have a one to one mapping 218 * 0 on success or a negative error code on failure.
181 * on this function.
182 */ 219 */
183 int (*dirty)(struct drm_framebuffer *framebuffer, 220 int (*dirty)(struct drm_framebuffer *framebuffer,
184 struct drm_file *file_priv, unsigned flags, 221 struct drm_file *file_priv, unsigned flags,
@@ -254,6 +291,11 @@ struct drm_plane;
254struct drm_bridge; 291struct drm_bridge;
255struct drm_atomic_state; 292struct drm_atomic_state;
256 293
294struct drm_crtc_helper_funcs;
295struct drm_encoder_helper_funcs;
296struct drm_connector_helper_funcs;
297struct drm_plane_helper_funcs;
298
257/** 299/**
258 * struct drm_crtc_state - mutable CRTC state 300 * struct drm_crtc_state - mutable CRTC state
259 * @crtc: backpointer to the CRTC 301 * @crtc: backpointer to the CRTC
@@ -264,6 +306,7 @@ struct drm_atomic_state;
264 * @active_changed: crtc_state->active has been toggled. 306 * @active_changed: crtc_state->active has been toggled.
265 * @connectors_changed: connectors to this crtc have been updated 307 * @connectors_changed: connectors to this crtc have been updated
266 * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes 308 * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
309 * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
267 * @last_vblank_count: for helpers and drivers to capture the vblank of the 310 * @last_vblank_count: for helpers and drivers to capture the vblank of the
268 * update to ensure framebuffer cleanup isn't done too early 311 * update to ensure framebuffer cleanup isn't done too early
269 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings 312 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
@@ -297,6 +340,8 @@ struct drm_crtc_state {
297 */ 340 */
298 u32 plane_mask; 341 u32 plane_mask;
299 342
343 u32 connector_mask;
344
300 /* last_vblank_count: for vblank waits before cleanup */ 345 /* last_vblank_count: for vblank waits before cleanup */
301 u32 last_vblank_count; 346 u32 last_vblank_count;
302 347
@@ -315,23 +360,6 @@ struct drm_crtc_state {
315 360
316/** 361/**
317 * struct drm_crtc_funcs - control CRTCs for a given device 362 * struct drm_crtc_funcs - control CRTCs for a given device
318 * @save: save CRTC state
319 * @restore: restore CRTC state
320 * @reset: reset CRTC after state has been invalidated (e.g. resume)
321 * @cursor_set: setup the cursor
322 * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
323 * @cursor_move: move the cursor
324 * @gamma_set: specify color ramp for CRTC
325 * @destroy: deinit and free object
326 * @set_property: called when a property is changed
327 * @set_config: apply a new CRTC configuration
328 * @page_flip: initiate a page flip
329 * @atomic_duplicate_state: duplicate the atomic state for this CRTC
330 * @atomic_destroy_state: destroy an atomic state for this CRTC
331 * @atomic_set_property: set a property on an atomic state for this CRTC
332 * (do not call directly, use drm_atomic_crtc_set_property())
333 * @atomic_get_property: get a property on an atomic state for this CRTC
334 * (do not call directly, use drm_atomic_crtc_get_property())
335 * 363 *
336 * The drm_crtc_funcs structure is the central CRTC management structure 364 * The drm_crtc_funcs structure is the central CRTC management structure
337 * in the DRM. Each CRTC controls one or more connectors (note that the name 365 * in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -343,54 +371,317 @@ struct drm_crtc_state {
343 * bus accessors. 371 * bus accessors.
344 */ 372 */
345struct drm_crtc_funcs { 373struct drm_crtc_funcs {
346 /* Save CRTC state */ 374 /**
347 void (*save)(struct drm_crtc *crtc); /* suspend? */ 375 * @reset:
348 /* Restore CRTC state */ 376 *
349 void (*restore)(struct drm_crtc *crtc); /* resume? */ 377 * Reset CRTC hardware and software state to off. This function isn't
350 /* Reset CRTC state */ 378 * called by the core directly, only through drm_mode_config_reset().
379 * It's not a helper hook only for historical reasons.
380 *
381 * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset
382 * atomic state using this hook.
383 */
351 void (*reset)(struct drm_crtc *crtc); 384 void (*reset)(struct drm_crtc *crtc);
352 385
353 /* cursor controls */ 386 /**
387 * @cursor_set:
388 *
389 * Update the cursor image. The cursor position is relative to the CRTC
390 * and can be partially or fully outside of the visible area.
391 *
392 * Note that contrary to all other KMS functions the legacy cursor entry
393 * points don't take a framebuffer object, but instead take directly a
394 * raw buffer object id from the driver's buffer manager (which is
395 * either GEM or TTM for current drivers).
396 *
397 * This entry point is deprecated, drivers should instead implement
398 * universal plane support and register a proper cursor plane using
399 * drm_crtc_init_with_planes().
400 *
401 * This callback is optional
402 *
403 * RETURNS:
404 *
405 * 0 on success or a negative error code on failure.
406 */
354 int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, 407 int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
355 uint32_t handle, uint32_t width, uint32_t height); 408 uint32_t handle, uint32_t width, uint32_t height);
409
410 /**
411 * @cursor_set2:
412 *
413 * Update the cursor image, including hotspot information. The hotspot
414 * must not affect the cursor position in CRTC coordinates, but is only
415 * meant as a hint for virtualized display hardware to coordinate the
416 * guests and hosts cursor position. The cursor hotspot is relative to
417 * the cursor image. Otherwise this works exactly like @cursor_set.
418 *
419 * This entry point is deprecated, drivers should instead implement
420 * universal plane support and register a proper cursor plane using
421 * drm_crtc_init_with_planes().
422 *
423 * This callback is optional.
424 *
425 * RETURNS:
426 *
427 * 0 on success or a negative error code on failure.
428 */
356 int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, 429 int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
357 uint32_t handle, uint32_t width, uint32_t height, 430 uint32_t handle, uint32_t width, uint32_t height,
358 int32_t hot_x, int32_t hot_y); 431 int32_t hot_x, int32_t hot_y);
432
433 /**
434 * @cursor_move:
435 *
436 * Update the cursor position. The cursor does not need to be visible
437 * when this hook is called.
438 *
439 * This entry point is deprecated, drivers should instead implement
440 * universal plane support and register a proper cursor plane using
441 * drm_crtc_init_with_planes().
442 *
443 * This callback is optional.
444 *
445 * RETURNS:
446 *
447 * 0 on success or a negative error code on failure.
448 */
359 int (*cursor_move)(struct drm_crtc *crtc, int x, int y); 449 int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
360 450
361 /* Set gamma on the CRTC */ 451 /**
452 * @gamma_set:
453 *
454 * Set gamma on the CRTC.
455 *
456 * This callback is optional.
457 *
458 * NOTE:
459 *
460 * Drivers that support gamma tables and also fbdev emulation through
461 * the provided helper library need to take care to fill out the gamma
462 * hooks for both. Currently there's a bit an unfortunate duplication
463 * going on, which should eventually be unified to just one set of
464 * hooks.
465 */
362 void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 466 void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
363 uint32_t start, uint32_t size); 467 uint32_t start, uint32_t size);
364 /* Object destroy routine */ 468
469 /**
470 * @destroy:
471 *
472 * Clean up plane resources. This is only called at driver unload time
473 * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged
474 * in DRM.
475 */
365 void (*destroy)(struct drm_crtc *crtc); 476 void (*destroy)(struct drm_crtc *crtc);
366 477
478 /**
479 * @set_config:
480 *
481 * This is the main legacy entry point to change the modeset state on a
482 * CRTC. All the details of the desired configuration are passed in a
483 * struct &drm_mode_set - see there for details.
484 *
485 * Drivers implementing atomic modeset should use
486 * drm_atomic_helper_set_config() to implement this hook.
487 *
488 * RETURNS:
489 *
490 * 0 on success or a negative error code on failure.
491 */
367 int (*set_config)(struct drm_mode_set *set); 492 int (*set_config)(struct drm_mode_set *set);
368 493
369 /* 494 /**
370 * Flip to the given framebuffer. This implements the page 495 * @page_flip:
371 * flip ioctl described in drm_mode.h, specifically, the 496 *
372 * implementation must return immediately and block all 497 * Legacy entry point to schedule a flip to the given framebuffer.
373 * rendering to the current fb until the flip has completed. 498 *
374 * If userspace set the event flag in the ioctl, the event 499 * Page flipping is a synchronization mechanism that replaces the frame
375 * argument will point to an event to send back when the flip 500 * buffer being scanned out by the CRTC with a new frame buffer during
376 * completes, otherwise it will be NULL. 501 * vertical blanking, avoiding tearing (except when requested otherwise
502 * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application
503 * requests a page flip the DRM core verifies that the new frame buffer
504 * is large enough to be scanned out by the CRTC in the currently
505 * configured mode and then calls the CRTC ->page_flip() operation with a
506 * pointer to the new frame buffer.
507 *
508 * The driver must wait for any pending rendering to the new framebuffer
509 * to complete before executing the flip. It should also wait for any
510 * pending rendering from other drivers if the underlying buffer is a
511 * shared dma-buf.
512 *
513 * An application can request to be notified when the page flip has
514 * completed. The drm core will supply a struct &drm_event in the event
515 * parameter in this case. This can be handled by the
516 * drm_crtc_send_vblank_event() function, which the driver should call on
517 * the provided event upon completion of the flip. Note that if
518 * the driver supports vblank signalling and timestamping the vblank
519 * counters and timestamps must agree with the ones returned from page
520 * flip events. With the current vblank helper infrastructure this can
521 * be achieved by holding a vblank reference while the page flip is
522 * pending, acquired through drm_crtc_vblank_get() and released with
523 * drm_crtc_vblank_put(). Drivers are free to implement their own vblank
524 * counter and timestamp tracking though, e.g. if they have accurate
525 * timestamp registers in hardware.
526 *
527 * FIXME:
528 *
529 * Up to that point drivers need to manage events themselves and can use
530 * even->base.list freely for that. Specifically they need to ensure
531 * that they don't send out page flip (or vblank) events for which the
532 * corresponding drm file has been closed already. The drm core
533 * unfortunately does not (yet) take care of that. Therefore drivers
534 * currently must clean up and release pending events in their
535 * ->preclose driver function.
536 *
537 * This callback is optional.
538 *
539 * NOTE:
540 *
541 * Very early versions of the KMS ABI mandated that the driver must
542 * block (but not reject) any rendering to the old framebuffer until the
543 * flip operation has completed and the old framebuffer is no longer
544 * visible. This requirement has been lifted, and userspace is instead
545 * expected to request delivery of an event and wait with recycling old
546 * buffers until such has been received.
547 *
548 * RETURNS:
549 *
550 * 0 on success or a negative error code on failure. Note that if a
551 * ->page_flip() operation is already pending the callback should return
552 * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode
553 * or just runtime disabled through DPMS respectively the new atomic
554 * "ACTIVE" state) should result in an -EINVAL error code. Note that
555 * drm_atomic_helper_page_flip() checks this already for atomic drivers.
377 */ 556 */
378 int (*page_flip)(struct drm_crtc *crtc, 557 int (*page_flip)(struct drm_crtc *crtc,
379 struct drm_framebuffer *fb, 558 struct drm_framebuffer *fb,
380 struct drm_pending_vblank_event *event, 559 struct drm_pending_vblank_event *event,
381 uint32_t flags); 560 uint32_t flags);
382 561
562 /**
563 * @set_property:
564 *
565 * This is the legacy entry point to update a property attached to the
566 * CRTC.
567 *
568 * Drivers implementing atomic modeset should use
569 * drm_atomic_helper_crtc_set_property() to implement this hook.
570 *
571 * This callback is optional if the driver does not support any legacy
572 * driver-private properties.
573 *
574 * RETURNS:
575 *
576 * 0 on success or a negative error code on failure.
577 */
383 int (*set_property)(struct drm_crtc *crtc, 578 int (*set_property)(struct drm_crtc *crtc,
384 struct drm_property *property, uint64_t val); 579 struct drm_property *property, uint64_t val);
385 580
386 /* atomic update handling */ 581 /**
582 * @atomic_duplicate_state:
583 *
584 * Duplicate the current atomic state for this CRTC and return it.
585 * The core and helpers gurantee that any atomic state duplicated with
586 * this hook and still owned by the caller (i.e. not transferred to the
587 * driver by calling ->atomic_commit() from struct
588 * &drm_mode_config_funcs) will be cleaned up by calling the
589 * @atomic_destroy_state hook in this structure.
590 *
591 * Atomic drivers which don't subclass struct &drm_crtc should use
592 * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
593 * state structure to extend it with driver-private state should use
594 * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is
595 * duplicated in a consistent fashion across drivers.
596 *
597 * It is an error to call this hook before crtc->state has been
598 * initialized correctly.
599 *
600 * NOTE:
601 *
602 * If the duplicate state references refcounted resources this hook must
603 * acquire a reference for each of them. The driver must release these
604 * references again in @atomic_destroy_state.
605 *
606 * RETURNS:
607 *
608 * Duplicated atomic state or NULL when the allocation failed.
609 */
387 struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); 610 struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
611
612 /**
613 * @atomic_destroy_state:
614 *
615 * Destroy a state duplicated with @atomic_duplicate_state and release
616 * or unreference all resources it references
617 */
388 void (*atomic_destroy_state)(struct drm_crtc *crtc, 618 void (*atomic_destroy_state)(struct drm_crtc *crtc,
389 struct drm_crtc_state *state); 619 struct drm_crtc_state *state);
620
621 /**
622 * @atomic_set_property:
623 *
624 * Decode a driver-private property value and store the decoded value
625 * into the passed-in state structure. Since the atomic core decodes all
626 * standardized properties (even for extensions beyond the core set of
627 * properties which might not be implemented by all drivers) this
628 * requires drivers to subclass the state structure.
629 *
630 * Such driver-private properties should really only be implemented for
631 * truly hardware/vendor specific state. Instead it is preferred to
632 * standardize atomic extension and decode the properties used to expose
633 * such an extension in the core.
634 *
635 * Do not call this function directly, use
636 * drm_atomic_crtc_set_property() instead.
637 *
638 * This callback is optional if the driver does not support any
639 * driver-private atomic properties.
640 *
641 * NOTE:
642 *
643 * This function is called in the state assembly phase of atomic
644 * modesets, which can be aborted for any reason (including on
645 * userspace's request to just check whether a configuration would be
646 * possible). Drivers MUST NOT touch any persistent state (hardware or
647 * software) or data structures except the passed in @state parameter.
648 *
649 * Also since userspace controls in which order properties are set this
650 * function must not do any input validation (since the state update is
651 * incomplete and hence likely inconsistent). Instead any such input
652 * validation must be done in the various atomic_check callbacks.
653 *
654 * RETURNS:
655 *
656 * 0 if the property has been found, -EINVAL if the property isn't
657 * implemented by the driver (which should never happen, the core only
658 * asks for properties attached to this CRTC). No other validation is
659 * allowed by the driver. The core already checks that the property
660 * value is within the range (integer, valid enum value, ...) the driver
661 * set when registering the property.
662 */
390 int (*atomic_set_property)(struct drm_crtc *crtc, 663 int (*atomic_set_property)(struct drm_crtc *crtc,
391 struct drm_crtc_state *state, 664 struct drm_crtc_state *state,
392 struct drm_property *property, 665 struct drm_property *property,
393 uint64_t val); 666 uint64_t val);
667 /**
668 * @atomic_get_property:
669 *
670 * Reads out the decoded driver-private property. This is used to
671 * implement the GETCRTC IOCTL.
672 *
673 * Do not call this function directly, use
674 * drm_atomic_crtc_get_property() instead.
675 *
676 * This callback is optional if the driver does not support any
677 * driver-private atomic properties.
678 *
679 * RETURNS:
680 *
681 * 0 on success, -EINVAL if the property isn't implemented by the
682 * driver (which should never happen, the core only asks for
683 * properties attached to this CRTC).
684 */
394 int (*atomic_get_property)(struct drm_crtc *crtc, 685 int (*atomic_get_property)(struct drm_crtc *crtc,
395 const struct drm_crtc_state *state, 686 const struct drm_crtc_state *state,
396 struct drm_property *property, 687 struct drm_property *property,
@@ -420,7 +711,7 @@ struct drm_crtc_funcs {
420 * @properties: property tracking for this CRTC 711 * @properties: property tracking for this CRTC
421 * @state: current atomic state for this CRTC 712 * @state: current atomic state for this CRTC
422 * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for 713 * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
423 * legacy ioctls 714 * legacy IOCTLs
424 * 715 *
425 * Each CRTC may have one or more connectors associated with it. This structure 716 * Each CRTC may have one or more connectors associated with it. This structure
426 * allows the CRTC to be controlled. 717 * allows the CRTC to be controlled.
@@ -430,6 +721,8 @@ struct drm_crtc {
430 struct device_node *port; 721 struct device_node *port;
431 struct list_head head; 722 struct list_head head;
432 723
724 char *name;
725
433 /* 726 /*
434 * crtc mutex 727 * crtc mutex
435 * 728 *
@@ -467,14 +760,14 @@ struct drm_crtc {
467 uint16_t *gamma_store; 760 uint16_t *gamma_store;
468 761
469 /* if you are using the helper */ 762 /* if you are using the helper */
470 const void *helper_private; 763 const struct drm_crtc_helper_funcs *helper_private;
471 764
472 struct drm_object_properties properties; 765 struct drm_object_properties properties;
473 766
474 struct drm_crtc_state *state; 767 struct drm_crtc_state *state;
475 768
476 /* 769 /*
477 * For legacy crtc ioctls so that atomic drivers can get at the locking 770 * For legacy crtc IOCTLs so that atomic drivers can get at the locking
478 * acquire context. 771 * acquire context.
479 */ 772 */
480 struct drm_modeset_acquire_ctx *acquire_ctx; 773 struct drm_modeset_acquire_ctx *acquire_ctx;
@@ -499,54 +792,239 @@ struct drm_connector_state {
499 792
500/** 793/**
501 * struct drm_connector_funcs - control connectors on a given device 794 * struct drm_connector_funcs - control connectors on a given device
502 * @dpms: set power state
503 * @save: save connector state
504 * @restore: restore connector state
505 * @reset: reset connector after state has been invalidated (e.g. resume)
506 * @detect: is this connector active?
507 * @fill_modes: fill mode list for this connector
508 * @set_property: property for this connector may need an update
509 * @destroy: make object go away
510 * @force: notify the driver that the connector is forced on
511 * @atomic_duplicate_state: duplicate the atomic state for this connector
512 * @atomic_destroy_state: destroy an atomic state for this connector
513 * @atomic_set_property: set a property on an atomic state for this connector
514 * (do not call directly, use drm_atomic_connector_set_property())
515 * @atomic_get_property: get a property on an atomic state for this connector
516 * (do not call directly, use drm_atomic_connector_get_property())
517 * 795 *
518 * Each CRTC may have one or more connectors attached to it. The functions 796 * Each CRTC may have one or more connectors attached to it. The functions
519 * below allow the core DRM code to control connectors, enumerate available modes, 797 * below allow the core DRM code to control connectors, enumerate available modes,
520 * etc. 798 * etc.
521 */ 799 */
522struct drm_connector_funcs { 800struct drm_connector_funcs {
801 /**
802 * @dpms:
803 *
804 * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
805 * is exposed as a standard property on the connector, but diverted to
806 * this callback in the drm core. Note that atomic drivers don't
807 * implement the 4 level DPMS support on the connector any more, but
808 * instead only have an on/off "ACTIVE" property on the CRTC object.
809 *
810 * Drivers implementing atomic modeset should use
811 * drm_atomic_helper_connector_dpms() to implement this hook.
812 *
813 * RETURNS:
814 *
815 * 0 on success or a negative error code on failure.
816 */
523 int (*dpms)(struct drm_connector *connector, int mode); 817 int (*dpms)(struct drm_connector *connector, int mode);
524 void (*save)(struct drm_connector *connector); 818
525 void (*restore)(struct drm_connector *connector); 819 /**
820 * @reset:
821 *
822 * Reset connector hardware and software state to off. This function isn't
823 * called by the core directly, only through drm_mode_config_reset().
824 * It's not a helper hook only for historical reasons.
825 *
826 * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
827 * atomic state using this hook.
828 */
526 void (*reset)(struct drm_connector *connector); 829 void (*reset)(struct drm_connector *connector);
527 830
528 /* Check to see if anything is attached to the connector. 831 /**
529 * @force is set to false whilst polling, true when checking the 832 * @detect:
530 * connector due to user request. @force can be used by the driver 833 *
531 * to avoid expensive, destructive operations during automated 834 * Check to see if anything is attached to the connector. The parameter
532 * probing. 835 * force is set to false whilst polling, true when checking the
836 * connector due to a user request. force can be used by the driver to
837 * avoid expensive, destructive operations during automated probing.
838 *
839 * FIXME:
840 *
841 * Note that this hook is only called by the probe helper. It's not in
842 * the helper library vtable purely for historical reasons. The only DRM
843 * core entry point to probe connector state is @fill_modes.
844 *
845 * RETURNS:
846 *
847 * drm_connector_status indicating the connector's status.
533 */ 848 */
534 enum drm_connector_status (*detect)(struct drm_connector *connector, 849 enum drm_connector_status (*detect)(struct drm_connector *connector,
535 bool force); 850 bool force);
851
852 /**
853 * @force:
854 *
855 * This function is called to update internal encoder state when the
856 * connector is forced to a certain state by userspace, either through
857 * the sysfs interfaces or on the kernel cmdline. In that case the
858 * @detect callback isn't called.
859 *
860 * FIXME:
861 *
862 * Note that this hook is only called by the probe helper. It's not in
863 * the helper library vtable purely for historical reasons. The only DRM
864 * core entry point to probe connector state is @fill_modes.
865 */
866 void (*force)(struct drm_connector *connector);
867
868 /**
869 * @fill_modes:
870 *
871 * Entry point for output detection and basic mode validation. The
872 * driver should reprobe the output if needed (e.g. when hotplug
873 * handling is unreliable), add all detected modes to connector->modes
874 * and filter out any the device can't support in any configuration. It
875 * also needs to filter out any modes wider or higher than the
876 * parameters max_width and max_height indicate.
877 *
878 * The drivers must also prune any modes no longer valid from
879 * connector->modes. Furthermore it must update connector->status and
880 * connector->edid. If no EDID has been received for this output
881 * connector->edid must be NULL.
882 *
883 * Drivers using the probe helpers should use
884 * drm_helper_probe_single_connector_modes() or
885 * drm_helper_probe_single_connector_modes_nomerge() to implement this
886 * function.
887 *
888 * RETURNS:
889 *
890 * The number of modes detected and filled into connector->modes.
891 */
536 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); 892 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
893
894 /**
895 * @set_property:
896 *
897 * This is the legacy entry point to update a property attached to the
898 * connector.
899 *
900 * Drivers implementing atomic modeset should use
901 * drm_atomic_helper_connector_set_property() to implement this hook.
902 *
903 * This callback is optional if the driver does not support any legacy
904 * driver-private properties.
905 *
906 * RETURNS:
907 *
908 * 0 on success or a negative error code on failure.
909 */
537 int (*set_property)(struct drm_connector *connector, struct drm_property *property, 910 int (*set_property)(struct drm_connector *connector, struct drm_property *property,
538 uint64_t val); 911 uint64_t val);
912
913 /**
914 * @destroy:
915 *
916 * Clean up connector resources. This is called at driver unload time
917 * through drm_mode_config_cleanup(). It can also be called at runtime
918 * when a connector is being hot-unplugged for drivers that support
919 * connector hotplugging (e.g. DisplayPort MST).
920 */
539 void (*destroy)(struct drm_connector *connector); 921 void (*destroy)(struct drm_connector *connector);
540 void (*force)(struct drm_connector *connector);
541 922
542 /* atomic update handling */ 923 /**
924 * @atomic_duplicate_state:
925 *
926 * Duplicate the current atomic state for this connector and return it.
927 * The core and helpers gurantee that any atomic state duplicated with
928 * this hook and still owned by the caller (i.e. not transferred to the
929 * driver by calling ->atomic_commit() from struct
930 * &drm_mode_config_funcs) will be cleaned up by calling the
931 * @atomic_destroy_state hook in this structure.
932 *
933 * Atomic drivers which don't subclass struct &drm_connector_state should use
934 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
935 * state structure to extend it with driver-private state should use
936 * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
937 * duplicated in a consistent fashion across drivers.
938 *
939 * It is an error to call this hook before connector->state has been
940 * initialized correctly.
941 *
942 * NOTE:
943 *
944 * If the duplicate state references refcounted resources this hook must
945 * acquire a reference for each of them. The driver must release these
946 * references again in @atomic_destroy_state.
947 *
948 * RETURNS:
949 *
950 * Duplicated atomic state or NULL when the allocation failed.
951 */
543 struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); 952 struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
953
954 /**
955 * @atomic_destroy_state:
956 *
957 * Destroy a state duplicated with @atomic_duplicate_state and release
958 * or unreference all resources it references
959 */
544 void (*atomic_destroy_state)(struct drm_connector *connector, 960 void (*atomic_destroy_state)(struct drm_connector *connector,
545 struct drm_connector_state *state); 961 struct drm_connector_state *state);
962
963 /**
964 * @atomic_set_property:
965 *
966 * Decode a driver-private property value and store the decoded value
967 * into the passed-in state structure. Since the atomic core decodes all
968 * standardized properties (even for extensions beyond the core set of
969 * properties which might not be implemented by all drivers) this
970 * requires drivers to subclass the state structure.
971 *
972 * Such driver-private properties should really only be implemented for
973 * truly hardware/vendor specific state. Instead it is preferred to
974 * standardize atomic extension and decode the properties used to expose
975 * such an extension in the core.
976 *
977 * Do not call this function directly, use
978 * drm_atomic_connector_set_property() instead.
979 *
980 * This callback is optional if the driver does not support any
981 * driver-private atomic properties.
982 *
983 * NOTE:
984 *
985 * This function is called in the state assembly phase of atomic
986 * modesets, which can be aborted for any reason (including on
987 * userspace's request to just check whether a configuration would be
988 * possible). Drivers MUST NOT touch any persistent state (hardware or
989 * software) or data structures except the passed in @state parameter.
990 *
991 * Also since userspace controls in which order properties are set this
992 * function must not do any input validation (since the state update is
993 * incomplete and hence likely inconsistent). Instead any such input
994 * validation must be done in the various atomic_check callbacks.
995 *
996 * RETURNS:
997 *
998 * 0 if the property has been found, -EINVAL if the property isn't
999 * implemented by the driver (which shouldn't ever happen, the core only
1000 * asks for properties attached to this connector). No other validation
1001 * is allowed by the driver. The core already checks that the property
1002 * value is within the range (integer, valid enum value, ...) the driver
1003 * set when registering the property.
1004 */
546 int (*atomic_set_property)(struct drm_connector *connector, 1005 int (*atomic_set_property)(struct drm_connector *connector,
547 struct drm_connector_state *state, 1006 struct drm_connector_state *state,
548 struct drm_property *property, 1007 struct drm_property *property,
549 uint64_t val); 1008 uint64_t val);
1009
1010 /**
1011 * @atomic_get_property:
1012 *
1013 * Reads out the decoded driver-private property. This is used to
1014 * implement the GETCONNECTOR IOCTL.
1015 *
1016 * Do not call this function directly, use
1017 * drm_atomic_connector_get_property() instead.
1018 *
1019 * This callback is optional if the driver does not support any
1020 * driver-private atomic properties.
1021 *
1022 * RETURNS:
1023 *
1024 * 0 on success, -EINVAL if the property isn't implemented by the
1025 * driver (which shouldn't ever happen, the core only asks for
1026 * properties attached to this connector).
1027 */
550 int (*atomic_get_property)(struct drm_connector *connector, 1028 int (*atomic_get_property)(struct drm_connector *connector,
551 const struct drm_connector_state *state, 1029 const struct drm_connector_state *state,
552 struct drm_property *property, 1030 struct drm_property *property,
@@ -555,13 +1033,26 @@ struct drm_connector_funcs {
555 1033
556/** 1034/**
557 * struct drm_encoder_funcs - encoder controls 1035 * struct drm_encoder_funcs - encoder controls
558 * @reset: reset state (e.g. at init or resume time)
559 * @destroy: cleanup and free associated data
560 * 1036 *
561 * Encoders sit between CRTCs and connectors. 1037 * Encoders sit between CRTCs and connectors.
562 */ 1038 */
563struct drm_encoder_funcs { 1039struct drm_encoder_funcs {
1040 /**
1041 * @reset:
1042 *
1043 * Reset encoder hardware and software state to off. This function isn't
1044 * called by the core directly, only through drm_mode_config_reset().
1045 * It's not a helper hook only for historical reasons.
1046 */
564 void (*reset)(struct drm_encoder *encoder); 1047 void (*reset)(struct drm_encoder *encoder);
1048
1049 /**
1050 * @destroy:
1051 *
1052 * Clean up encoder resources. This is only called at driver unload time
1053 * through drm_mode_config_cleanup() since an encoder cannot be
1054 * hotplugged in DRM.
1055 */
565 void (*destroy)(struct drm_encoder *encoder); 1056 void (*destroy)(struct drm_encoder *encoder);
566}; 1057};
567 1058
@@ -597,7 +1088,7 @@ struct drm_encoder {
597 struct drm_crtc *crtc; 1088 struct drm_crtc *crtc;
598 struct drm_bridge *bridge; 1089 struct drm_bridge *bridge;
599 const struct drm_encoder_funcs *funcs; 1090 const struct drm_encoder_funcs *funcs;
600 const void *helper_private; 1091 const struct drm_encoder_helper_funcs *helper_private;
601}; 1092};
602 1093
603/* should we poll this connector for connects and disconnects */ 1094/* should we poll this connector for connects and disconnects */
@@ -702,7 +1193,7 @@ struct drm_connector {
702 /* requested DPMS state */ 1193 /* requested DPMS state */
703 int dpms; 1194 int dpms;
704 1195
705 const void *helper_private; 1196 const struct drm_connector_helper_funcs *helper_private;
706 1197
707 /* forced on connector */ 1198 /* forced on connector */
708 struct drm_cmdline_mode cmdline_mode; 1199 struct drm_cmdline_mode cmdline_mode;
@@ -782,40 +1273,203 @@ struct drm_plane_state {
782 1273
783/** 1274/**
784 * struct drm_plane_funcs - driver plane control functions 1275 * struct drm_plane_funcs - driver plane control functions
785 * @update_plane: update the plane configuration
786 * @disable_plane: shut down the plane
787 * @destroy: clean up plane resources
788 * @reset: reset plane after state has been invalidated (e.g. resume)
789 * @set_property: called when a property is changed
790 * @atomic_duplicate_state: duplicate the atomic state for this plane
791 * @atomic_destroy_state: destroy an atomic state for this plane
792 * @atomic_set_property: set a property on an atomic state for this plane
793 * (do not call directly, use drm_atomic_plane_set_property())
794 * @atomic_get_property: get a property on an atomic state for this plane
795 * (do not call directly, use drm_atomic_plane_get_property())
796 */ 1276 */
797struct drm_plane_funcs { 1277struct drm_plane_funcs {
1278 /**
1279 * @update_plane:
1280 *
1281 * This is the legacy entry point to enable and configure the plane for
1282 * the given CRTC and framebuffer. It is never called to disable the
1283 * plane, i.e. the passed-in crtc and fb paramters are never NULL.
1284 *
1285 * The source rectangle in frame buffer memory coordinates is given by
1286 * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
1287 * values). Devices that don't support subpixel plane coordinates can
1288 * ignore the fractional part.
1289 *
1290 * The destination rectangle in CRTC coordinates is given by the
1291 * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
1292 * Devices scale the source rectangle to the destination rectangle. If
1293 * scaling is not supported, and the source rectangle size doesn't match
1294 * the destination rectangle size, the driver must return a
1295 * -<errorname>EINVAL</errorname> error.
1296 *
1297 * Drivers implementing atomic modeset should use
1298 * drm_atomic_helper_update_plane() to implement this hook.
1299 *
1300 * RETURNS:
1301 *
1302 * 0 on success or a negative error code on failure.
1303 */
798 int (*update_plane)(struct drm_plane *plane, 1304 int (*update_plane)(struct drm_plane *plane,
799 struct drm_crtc *crtc, struct drm_framebuffer *fb, 1305 struct drm_crtc *crtc, struct drm_framebuffer *fb,
800 int crtc_x, int crtc_y, 1306 int crtc_x, int crtc_y,
801 unsigned int crtc_w, unsigned int crtc_h, 1307 unsigned int crtc_w, unsigned int crtc_h,
802 uint32_t src_x, uint32_t src_y, 1308 uint32_t src_x, uint32_t src_y,
803 uint32_t src_w, uint32_t src_h); 1309 uint32_t src_w, uint32_t src_h);
1310
1311 /**
1312 * @disable_plane:
1313 *
1314 * This is the legacy entry point to disable the plane. The DRM core
1315 * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
1316 * with the frame buffer ID set to 0. Disabled planes must not be
1317 * processed by the CRTC.
1318 *
1319 * Drivers implementing atomic modeset should use
1320 * drm_atomic_helper_disable_plane() to implement this hook.
1321 *
1322 * RETURNS:
1323 *
1324 * 0 on success or a negative error code on failure.
1325 */
804 int (*disable_plane)(struct drm_plane *plane); 1326 int (*disable_plane)(struct drm_plane *plane);
1327
1328 /**
1329 * @destroy:
1330 *
1331 * Clean up plane resources. This is only called at driver unload time
1332 * through drm_mode_config_cleanup() since a plane cannot be hotplugged
1333 * in DRM.
1334 */
805 void (*destroy)(struct drm_plane *plane); 1335 void (*destroy)(struct drm_plane *plane);
1336
1337 /**
1338 * @reset:
1339 *
1340 * Reset plane hardware and software state to off. This function isn't
1341 * called by the core directly, only through drm_mode_config_reset().
1342 * It's not a helper hook only for historical reasons.
1343 *
1344 * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
1345 * atomic state using this hook.
1346 */
806 void (*reset)(struct drm_plane *plane); 1347 void (*reset)(struct drm_plane *plane);
807 1348
1349 /**
1350 * @set_property:
1351 *
1352 * This is the legacy entry point to update a property attached to the
1353 * plane.
1354 *
1355 * Drivers implementing atomic modeset should use
1356 * drm_atomic_helper_plane_set_property() to implement this hook.
1357 *
1358 * This callback is optional if the driver does not support any legacy
1359 * driver-private properties.
1360 *
1361 * RETURNS:
1362 *
1363 * 0 on success or a negative error code on failure.
1364 */
808 int (*set_property)(struct drm_plane *plane, 1365 int (*set_property)(struct drm_plane *plane,
809 struct drm_property *property, uint64_t val); 1366 struct drm_property *property, uint64_t val);
810 1367
811 /* atomic update handling */ 1368 /**
1369 * @atomic_duplicate_state:
1370 *
1371 * Duplicate the current atomic state for this plane and return it.
1372 * The core and helpers gurantee that any atomic state duplicated with
1373 * this hook and still owned by the caller (i.e. not transferred to the
1374 * driver by calling ->atomic_commit() from struct
1375 * &drm_mode_config_funcs) will be cleaned up by calling the
1376 * @atomic_destroy_state hook in this structure.
1377 *
1378 * Atomic drivers which don't subclass struct &drm_plane_state should use
1379 * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
1380 * state structure to extend it with driver-private state should use
1381 * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
1382 * duplicated in a consistent fashion across drivers.
1383 *
1384 * It is an error to call this hook before plane->state has been
1385 * initialized correctly.
1386 *
1387 * NOTE:
1388 *
1389 * If the duplicate state references refcounted resources this hook must
1390 * acquire a reference for each of them. The driver must release these
1391 * references again in @atomic_destroy_state.
1392 *
1393 * RETURNS:
1394 *
1395 * Duplicated atomic state or NULL when the allocation failed.
1396 */
812 struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); 1397 struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
1398
1399 /**
1400 * @atomic_destroy_state:
1401 *
1402 * Destroy a state duplicated with @atomic_duplicate_state and release
1403 * or unreference all resources it references
1404 */
813 void (*atomic_destroy_state)(struct drm_plane *plane, 1405 void (*atomic_destroy_state)(struct drm_plane *plane,
814 struct drm_plane_state *state); 1406 struct drm_plane_state *state);
1407
1408 /**
1409 * @atomic_set_property:
1410 *
1411 * Decode a driver-private property value and store the decoded value
1412 * into the passed-in state structure. Since the atomic core decodes all
1413 * standardized properties (even for extensions beyond the core set of
1414 * properties which might not be implemented by all drivers) this
1415 * requires drivers to subclass the state structure.
1416 *
1417 * Such driver-private properties should really only be implemented for
1418 * truly hardware/vendor specific state. Instead it is preferred to
1419 * standardize atomic extension and decode the properties used to expose
1420 * such an extension in the core.
1421 *
1422 * Do not call this function directly, use
1423 * drm_atomic_plane_set_property() instead.
1424 *
1425 * This callback is optional if the driver does not support any
1426 * driver-private atomic properties.
1427 *
1428 * NOTE:
1429 *
1430 * This function is called in the state assembly phase of atomic
1431 * modesets, which can be aborted for any reason (including on
1432 * userspace's request to just check whether a configuration would be
1433 * possible). Drivers MUST NOT touch any persistent state (hardware or
1434 * software) or data structures except the passed in @state parameter.
1435 *
1436 * Also since userspace controls in which order properties are set this
1437 * function must not do any input validation (since the state update is
1438 * incomplete and hence likely inconsistent). Instead any such input
1439 * validation must be done in the various atomic_check callbacks.
1440 *
1441 * RETURNS:
1442 *
1443 * 0 if the property has been found, -EINVAL if the property isn't
1444 * implemented by the driver (which shouldn't ever happen, the core only
1445 * asks for properties attached to this plane). No other validation is
1446 * allowed by the driver. The core already checks that the property
1447 * value is within the range (integer, valid enum value, ...) the driver
1448 * set when registering the property.
1449 */
815 int (*atomic_set_property)(struct drm_plane *plane, 1450 int (*atomic_set_property)(struct drm_plane *plane,
816 struct drm_plane_state *state, 1451 struct drm_plane_state *state,
817 struct drm_property *property, 1452 struct drm_property *property,
818 uint64_t val); 1453 uint64_t val);
1454
1455 /**
1456 * @atomic_get_property:
1457 *
1458 * Reads out the decoded driver-private property. This is used to
1459 * implement the GETPLANE IOCTL.
1460 *
1461 * Do not call this function directly, use
1462 * drm_atomic_plane_get_property() instead.
1463 *
1464 * This callback is optional if the driver does not support any
1465 * driver-private atomic properties.
1466 *
1467 * RETURNS:
1468 *
1469 * 0 on success, -EINVAL if the property isn't implemented by the
1470 * driver (which should never happen, the core only asks for
1471 * properties attached to this plane).
1472 */
819 int (*atomic_get_property)(struct drm_plane *plane, 1473 int (*atomic_get_property)(struct drm_plane *plane,
820 const struct drm_plane_state *state, 1474 const struct drm_plane_state *state,
821 struct drm_property *property, 1475 struct drm_property *property,
@@ -828,6 +1482,7 @@ enum drm_plane_type {
828 DRM_PLANE_TYPE_CURSOR, 1482 DRM_PLANE_TYPE_CURSOR,
829}; 1483};
830 1484
1485
831/** 1486/**
832 * struct drm_plane - central DRM plane control structure 1487 * struct drm_plane - central DRM plane control structure
833 * @dev: DRM device this plane belongs to 1488 * @dev: DRM device this plane belongs to
@@ -850,6 +1505,8 @@ struct drm_plane {
850 struct drm_device *dev; 1505 struct drm_device *dev;
851 struct list_head head; 1506 struct list_head head;
852 1507
1508 char *name;
1509
853 struct drm_modeset_lock mutex; 1510 struct drm_modeset_lock mutex;
854 1511
855 struct drm_mode_object base; 1512 struct drm_mode_object base;
@@ -870,7 +1527,7 @@ struct drm_plane {
870 1527
871 enum drm_plane_type type; 1528 enum drm_plane_type type;
872 1529
873 const void *helper_private; 1530 const struct drm_plane_helper_funcs *helper_private;
874 1531
875 struct drm_plane_state *state; 1532 struct drm_plane_state *state;
876}; 1533};
@@ -878,24 +1535,114 @@ struct drm_plane {
878/** 1535/**
879 * struct drm_bridge_funcs - drm_bridge control functions 1536 * struct drm_bridge_funcs - drm_bridge control functions
880 * @attach: Called during drm_bridge_attach 1537 * @attach: Called during drm_bridge_attach
881 * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
882 * @disable: Called right before encoder prepare, disables the bridge
883 * @post_disable: Called right after encoder prepare, for lockstepped disable
884 * @mode_set: Set this mode to the bridge
885 * @pre_enable: Called right before encoder commit, for lockstepped commit
886 * @enable: Called right after encoder commit, enables the bridge
887 */ 1538 */
888struct drm_bridge_funcs { 1539struct drm_bridge_funcs {
889 int (*attach)(struct drm_bridge *bridge); 1540 int (*attach)(struct drm_bridge *bridge);
1541
1542 /**
1543 * @mode_fixup:
1544 *
1545 * This callback is used to validate and adjust a mode. The paramater
1546 * mode is the display mode that should be fed to the next element in
1547 * the display chain, either the final &drm_connector or the next
1548 * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
1549 * requires. It can be modified by this callback and does not need to
1550 * match mode.
1551 *
1552 * This is the only hook that allows a bridge to reject a modeset. If
1553 * this function passes all other callbacks must succeed for this
1554 * configuration.
1555 *
1556 * NOTE:
1557 *
1558 * This function is called in the check phase of atomic modesets, which
1559 * can be aborted for any reason (including on userspace's request to
1560 * just check whether a configuration would be possible). Drivers MUST
1561 * NOT touch any persistent state (hardware or software) or data
1562 * structures except the passed in @state parameter.
1563 *
1564 * RETURNS:
1565 *
1566 * True if an acceptable configuration is possible, false if the modeset
1567 * operation should be rejected.
1568 */
890 bool (*mode_fixup)(struct drm_bridge *bridge, 1569 bool (*mode_fixup)(struct drm_bridge *bridge,
891 const struct drm_display_mode *mode, 1570 const struct drm_display_mode *mode,
892 struct drm_display_mode *adjusted_mode); 1571 struct drm_display_mode *adjusted_mode);
1572 /**
1573 * @disable:
1574 *
1575 * This callback should disable the bridge. It is called right before
1576 * the preceding element in the display pipe is disabled. If the
1577 * preceding element is a bridge this means it's called before that
1578 * bridge's ->disable() function. If the preceding element is a
1579 * &drm_encoder it's called right before the encoder's ->disable(),
1580 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1581 *
1582 * The bridge can assume that the display pipe (i.e. clocks and timing
1583 * signals) feeding it is still running when this callback is called.
1584 */
893 void (*disable)(struct drm_bridge *bridge); 1585 void (*disable)(struct drm_bridge *bridge);
1586
1587 /**
1588 * @post_disable:
1589 *
1590 * This callback should disable the bridge. It is called right after
1591 * the preceding element in the display pipe is disabled. If the
1592 * preceding element is a bridge this means it's called after that
1593 * bridge's ->post_disable() function. If the preceding element is a
1594 * &drm_encoder it's called right after the encoder's ->disable(),
1595 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1596 *
1597 * The bridge must assume that the display pipe (i.e. clocks and timing
1598 * singals) feeding it is no longer running when this callback is
1599 * called.
1600 */
894 void (*post_disable)(struct drm_bridge *bridge); 1601 void (*post_disable)(struct drm_bridge *bridge);
1602
1603 /**
1604 * @mode_set:
1605 *
1606 * This callback should set the given mode on the bridge. It is called
1607 * after the ->mode_set() callback for the preceding element in the
1608 * display pipeline has been called already. The display pipe (i.e.
1609 * clocks and timing signals) is off when this function is called.
1610 */
895 void (*mode_set)(struct drm_bridge *bridge, 1611 void (*mode_set)(struct drm_bridge *bridge,
896 struct drm_display_mode *mode, 1612 struct drm_display_mode *mode,
897 struct drm_display_mode *adjusted_mode); 1613 struct drm_display_mode *adjusted_mode);
1614 /**
1615 * @pre_enable:
1616 *
1617 * This callback should enable the bridge. It is called right before
1618 * the preceding element in the display pipe is enabled. If the
1619 * preceding element is a bridge this means it's called before that
1620 * bridge's ->pre_enable() function. If the preceding element is a
1621 * &drm_encoder it's called right before the encoder's ->enable(),
1622 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1623 *
1624 * The display pipe (i.e. clocks and timing signals) feeding this bridge
1625 * will not yet be running when this callback is called. The bridge must
1626 * not enable the display link feeding the next bridge in the chain (if
1627 * there is one) when this callback is called.
1628 */
898 void (*pre_enable)(struct drm_bridge *bridge); 1629 void (*pre_enable)(struct drm_bridge *bridge);
1630
1631 /**
1632 * @enable:
1633 *
1634 * This callback should enable the bridge. It is called right after
1635 * the preceding element in the display pipe is enabled. If the
1636 * preceding element is a bridge this means it's called after that
1637 * bridge's ->enable() function. If the preceding element is a
1638 * &drm_encoder it's called right after the encoder's ->enable(),
1639 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1640 *
1641 * The bridge can assume that the display pipe (i.e. clocks and timing
1642 * signals) feeding it is running when this callback is called. This
1643 * callback must enable the display link feeding the next bridge in the
1644 * chain if there is one.
1645 */
899 void (*enable)(struct drm_bridge *bridge); 1646 void (*enable)(struct drm_bridge *bridge);
900}; 1647};
901 1648
@@ -926,7 +1673,7 @@ struct drm_bridge {
926 * struct drm_atomic_state - the global state object for atomic updates 1673 * struct drm_atomic_state - the global state object for atomic updates
927 * @dev: parent DRM device 1674 * @dev: parent DRM device
928 * @allow_modeset: allow full modeset 1675 * @allow_modeset: allow full modeset
929 * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics 1676 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
930 * @planes: pointer to array of plane pointers 1677 * @planes: pointer to array of plane pointers
931 * @plane_states: pointer to array of plane states pointers 1678 * @plane_states: pointer to array of plane states pointers
932 * @crtcs: pointer to array of CRTC pointers 1679 * @crtcs: pointer to array of CRTC pointers
@@ -981,31 +1728,265 @@ struct drm_mode_set {
981 1728
982/** 1729/**
983 * struct drm_mode_config_funcs - basic driver provided mode setting functions 1730 * struct drm_mode_config_funcs - basic driver provided mode setting functions
984 * @fb_create: create a new framebuffer object
985 * @output_poll_changed: function to handle output configuration changes
986 * @atomic_check: check whether a given atomic state update is possible
987 * @atomic_commit: commit an atomic state update previously verified with
988 * atomic_check()
989 * @atomic_state_alloc: allocate a new atomic state
990 * @atomic_state_clear: clear the atomic state
991 * @atomic_state_free: free the atomic state
992 * 1731 *
993 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that 1732 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
994 * involve drivers. 1733 * involve drivers.
995 */ 1734 */
996struct drm_mode_config_funcs { 1735struct drm_mode_config_funcs {
1736 /**
1737 * @fb_create:
1738 *
1739 * Create a new framebuffer object. The core does basic checks on the
1740 * requested metadata, but most of that is left to the driver. See
1741 * struct &drm_mode_fb_cmd2 for details.
1742 *
1743 * If the parameters are deemed valid and the backing storage objects in
1744 * the underlying memory manager all exist, then the driver allocates
1745 * a new &drm_framebuffer structure, subclassed to contain
1746 * driver-specific information (like the internal native buffer object
1747 * references). It also needs to fill out all relevant metadata, which
1748 * should be done by calling drm_helper_mode_fill_fb_struct().
1749 *
1750 * The initialization is finalized by calling drm_framebuffer_init(),
1751 * which registers the framebuffer and makes it accessible to other
1752 * threads.
1753 *
1754 * RETURNS:
1755 *
1756 * A new framebuffer with an initial reference count of 1 or a negative
1757 * error code encoded with ERR_PTR().
1758 */
997 struct drm_framebuffer *(*fb_create)(struct drm_device *dev, 1759 struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
998 struct drm_file *file_priv, 1760 struct drm_file *file_priv,
999 const struct drm_mode_fb_cmd2 *mode_cmd); 1761 const struct drm_mode_fb_cmd2 *mode_cmd);
1762
1763 /**
1764 * @output_poll_changed:
1765 *
1766 * Callback used by helpers to inform the driver of output configuration
1767 * changes.
1768 *
1769 * Drivers implementing fbdev emulation with the helpers can call
1770 * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
1771 * helper of output changes.
1772 *
1773 * FIXME:
1774 *
1775 * Except that there's no vtable for device-level helper callbacks
1776 * there's no reason this is a core function.
1777 */
1000 void (*output_poll_changed)(struct drm_device *dev); 1778 void (*output_poll_changed)(struct drm_device *dev);
1001 1779
1780 /**
1781 * @atomic_check:
1782 *
1783 * This is the only hook to validate an atomic modeset update. This
1784 * function must reject any modeset and state changes which the hardware
1785 * or driver doesn't support. This includes but is of course not limited
1786 * to:
1787 *
1788 * - Checking that the modes, framebuffers, scaling and placement
1789 * requirements and so on are within the limits of the hardware.
1790 *
1791 * - Checking that any hidden shared resources are not oversubscribed.
1792 * This can be shared PLLs, shared lanes, overall memory bandwidth,
1793 * display fifo space (where shared between planes or maybe even
1794 * CRTCs).
1795 *
1796 * - Checking that virtualized resources exported to userspace are not
1797 * oversubscribed. For various reasons it can make sense to expose
1798 * more planes, crtcs or encoders than which are physically there. One
1799 * example is dual-pipe operations (which generally should be hidden
1800 * from userspace if when lockstepped in hardware, exposed otherwise),
1801 * where a plane might need 1 hardware plane (if it's just on one
1802 * pipe), 2 hardware planes (when it spans both pipes) or maybe even
1803 * shared a hardware plane with a 2nd plane (if there's a compatible
1804 * plane requested on the area handled by the other pipe).
1805 *
1806 * - Check that any transitional state is possible and that if
1807 * requested, the update can indeed be done in the vblank period
1808 * without temporarily disabling some functions.
1809 *
1810 * - Check any other constraints the driver or hardware might have.
1811 *
1812 * - This callback also needs to correctly fill out the &drm_crtc_state
1813 * in this update to make sure that drm_atomic_crtc_needs_modeset()
1814 * reflects the nature of the possible update and returns true if and
1815 * only if the update cannot be applied without tearing within one
1816 * vblank on that CRTC. The core uses that information to reject
1817 * updates which require a full modeset (i.e. blanking the screen, or
1818 * at least pausing updates for a substantial amount of time) if
1819 * userspace has disallowed that in its request.
1820 *
1821 * - The driver also does not need to repeat basic input validation
1822 * like done for the corresponding legacy entry points. The core does
1823 * that before calling this hook.
1824 *
1825 * See the documentation of @atomic_commit for an exhaustive list of
1826 * error conditions which don't have to be checked at the
1827 * ->atomic_check() stage?
1828 *
1829 * See the documentation for struct &drm_atomic_state for how exactly
1830 * an atomic modeset update is described.
1831 *
1832 * Drivers using the atomic helpers can implement this hook using
1833 * drm_atomic_helper_check(), or one of the exported sub-functions of
1834 * it.
1835 *
1836 * RETURNS:
1837 *
1838 * 0 on success or one of the below negative error codes:
1839 *
1840 * - -EINVAL, if any of the above constraints are violated.
1841 *
1842 * - -EDEADLK, when returned from an attempt to acquire an additional
1843 * &drm_modeset_lock through drm_modeset_lock().
1844 *
1845 * - -ENOMEM, if allocating additional state sub-structures failed due
1846 * to lack of memory.
1847 *
1848 * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
1849 * This can either be due to a pending signal, or because the driver
1850 * needs to completely bail out to recover from an exceptional
1851 * situation like a GPU hang. From a userspace point all errors are
1852 * treated equally.
1853 */
1002 int (*atomic_check)(struct drm_device *dev, 1854 int (*atomic_check)(struct drm_device *dev,
1003 struct drm_atomic_state *a); 1855 struct drm_atomic_state *state);
1856
1857 /**
1858 * @atomic_commit:
1859 *
1860 * This is the only hook to commit an atomic modeset update. The core
1861 * guarantees that @atomic_check has been called successfully before
1862 * calling this function, and that nothing has been changed in the
1863 * interim.
1864 *
1865 * See the documentation for struct &drm_atomic_state for how exactly
1866 * an atomic modeset update is described.
1867 *
1868 * Drivers using the atomic helpers can implement this hook using
1869 * drm_atomic_helper_commit(), or one of the exported sub-functions of
1870 * it.
1871 *
1872 * Asynchronous commits (as indicated with the async parameter) must
1873 * do any preparatory work which might result in an unsuccessful commit
1874 * in the context of this callback. The only exceptions are hardware
1875 * errors resulting in -EIO. But even in that case the driver must
1876 * ensure that the display pipe is at least running, to avoid
1877 * compositors crashing when pageflips don't work. Anything else,
1878 * specifically committing the update to the hardware, should be done
1879 * without blocking the caller. For updates which do not require a
1880 * modeset this must be guaranteed.
1881 *
1882 * The driver must wait for any pending rendering to the new
1883 * framebuffers to complete before executing the flip. It should also
1884 * wait for any pending rendering from other drivers if the underlying
1885 * buffer is a shared dma-buf. Asynchronous commits must not wait for
1886 * rendering in the context of this callback.
1887 *
1888 * An application can request to be notified when the atomic commit has
1889 * completed. These events are per-CRTC and can be distinguished by the
1890 * CRTC index supplied in &drm_event to userspace.
1891 *
1892 * The drm core will supply a struct &drm_event in the event
1893 * member of each CRTC's &drm_crtc_state structure. This can be handled by the
1894 * drm_crtc_send_vblank_event() function, which the driver should call on
1895 * the provided event upon completion of the atomic commit. Note that if
1896 * the driver supports vblank signalling and timestamping the vblank
1897 * counters and timestamps must agree with the ones returned from page
1898 * flip events. With the current vblank helper infrastructure this can
1899 * be achieved by holding a vblank reference while the page flip is
1900 * pending, acquired through drm_crtc_vblank_get() and released with
1901 * drm_crtc_vblank_put(). Drivers are free to implement their own vblank
1902 * counter and timestamp tracking though, e.g. if they have accurate
1903 * timestamp registers in hardware.
1904 *
1905 * NOTE:
1906 *
1907 * Drivers are not allowed to shut down any display pipe successfully
1908 * enabled through an atomic commit on their own. Doing so can result in
1909 * compositors crashing if a page flip is suddenly rejected because the
1910 * pipe is off.
1911 *
1912 * RETURNS:
1913 *
1914 * 0 on success or one of the below negative error codes:
1915 *
1916 * - -EBUSY, if an asynchronous updated is requested and there is
1917 * an earlier updated pending. Drivers are allowed to support a queue
1918 * of outstanding updates, but currently no driver supports that.
1919 * Note that drivers must wait for preceding updates to complete if a
1920 * synchronous update is requested, they are not allowed to fail the
1921 * commit in that case.
1922 *
1923 * - -ENOMEM, if the driver failed to allocate memory. Specifically
1924 * this can happen when trying to pin framebuffers, which must only
1925 * be done when committing the state.
1926 *
1927 * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
1928 * that the driver has run out of vram, iommu space or similar GPU
1929 * address space needed for framebuffer.
1930 *
1931 * - -EIO, if the hardware completely died.
1932 *
1933 * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
1934 * This can either be due to a pending signal, or because the driver
1935 * needs to completely bail out to recover from an exceptional
1936 * situation like a GPU hang. From a userspace point of view all errors are
1937 * treated equally.
1938 *
1939 * This list is exhaustive. Specifically this hook is not allowed to
1940 * return -EINVAL (any invalid requests should be caught in
1941 * @atomic_check) or -EDEADLK (this function must not acquire
1942 * additional modeset locks).
1943 */
1004 int (*atomic_commit)(struct drm_device *dev, 1944 int (*atomic_commit)(struct drm_device *dev,
1005 struct drm_atomic_state *a, 1945 struct drm_atomic_state *state,
1006 bool async); 1946 bool async);
1947
1948 /**
1949 * @atomic_state_alloc:
1950 *
1951 * This optional hook can be used by drivers that want to subclass struct
1952 * &drm_atomic_state to be able to track their own driver-private global
1953 * state easily. If this hook is implemented, drivers must also
1954 * implement @atomic_state_clear and @atomic_state_free.
1955 *
1956 * RETURNS:
1957 *
1958 * A new &drm_atomic_state on success or NULL on failure.
1959 */
1007 struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); 1960 struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
1961
1962 /**
1963 * @atomic_state_clear:
1964 *
1965 * This hook must clear any driver private state duplicated into the
1966 * passed-in &drm_atomic_state. This hook is called when the caller
1967 * encountered a &drm_modeset_lock deadlock and needs to drop all
1968 * already acquired locks as part of the deadlock avoidance dance
1969 * implemented in drm_modeset_lock_backoff().
1970 *
1971 * Any duplicated state must be invalidated since a concurrent atomic
1972 * update might change it, and the drm atomic interfaces always apply
1973 * updates as relative changes to the current state.
1974 *
1975 * Drivers that implement this must call drm_atomic_state_default_clear()
1976 * to clear common state.
1977 */
1008 void (*atomic_state_clear)(struct drm_atomic_state *state); 1978 void (*atomic_state_clear)(struct drm_atomic_state *state);
1979
1980 /**
1981 * @atomic_state_free:
1982 *
1983 * This hook needs driver private resources and the &drm_atomic_state
1984 * itself. Note that the core first calls drm_atomic_state_clear() to
1985 * avoid code duplicate between the clear and free hooks.
1986 *
1987 * Drivers that implement this must call drm_atomic_state_default_free()
1988 * to release common resources.
1989 */
1009 void (*atomic_state_free)(struct drm_atomic_state *state); 1990 void (*atomic_state_free)(struct drm_atomic_state *state);
1010}; 1991};
1011 1992
@@ -1014,7 +1995,7 @@ struct drm_mode_config_funcs {
1014 * @mutex: mutex protecting KMS related lists and structures 1995 * @mutex: mutex protecting KMS related lists and structures
1015 * @connection_mutex: ww mutex protecting connector state and routing 1996 * @connection_mutex: ww mutex protecting connector state and routing
1016 * @acquire_ctx: global implicit acquire context used by atomic drivers for 1997 * @acquire_ctx: global implicit acquire context used by atomic drivers for
1017 * legacy ioctls 1998 * legacy IOCTLs
1018 * @idr_mutex: mutex for KMS ID allocation and management 1999 * @idr_mutex: mutex for KMS ID allocation and management
1019 * @crtc_idr: main KMS ID tracking object 2000 * @crtc_idr: main KMS ID tracking object
1020 * @fb_lock: mutex to protect fb state and lists 2001 * @fb_lock: mutex to protect fb state and lists
@@ -1187,11 +2168,13 @@ struct drm_prop_enum_list {
1187 char *name; 2168 char *name;
1188}; 2169};
1189 2170
1190extern int drm_crtc_init_with_planes(struct drm_device *dev, 2171extern __printf(6, 7)
1191 struct drm_crtc *crtc, 2172int drm_crtc_init_with_planes(struct drm_device *dev,
1192 struct drm_plane *primary, 2173 struct drm_crtc *crtc,
1193 struct drm_plane *cursor, 2174 struct drm_plane *primary,
1194 const struct drm_crtc_funcs *funcs); 2175 struct drm_plane *cursor,
2176 const struct drm_crtc_funcs *funcs,
2177 const char *name, ...);
1195extern void drm_crtc_cleanup(struct drm_crtc *crtc); 2178extern void drm_crtc_cleanup(struct drm_crtc *crtc);
1196extern unsigned int drm_crtc_index(struct drm_crtc *crtc); 2179extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
1197 2180
@@ -1237,10 +2220,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
1237void drm_bridge_pre_enable(struct drm_bridge *bridge); 2220void drm_bridge_pre_enable(struct drm_bridge *bridge);
1238void drm_bridge_enable(struct drm_bridge *bridge); 2221void drm_bridge_enable(struct drm_bridge *bridge);
1239 2222
1240extern int drm_encoder_init(struct drm_device *dev, 2223extern __printf(5, 6)
1241 struct drm_encoder *encoder, 2224int drm_encoder_init(struct drm_device *dev,
1242 const struct drm_encoder_funcs *funcs, 2225 struct drm_encoder *encoder,
1243 int encoder_type); 2226 const struct drm_encoder_funcs *funcs,
2227 int encoder_type, const char *name, ...);
1244 2228
1245/** 2229/**
1246 * drm_encoder_crtc_ok - can a given crtc drive a given encoder? 2230 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -1255,13 +2239,15 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
1255 return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); 2239 return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
1256} 2240}
1257 2241
1258extern int drm_universal_plane_init(struct drm_device *dev, 2242extern __printf(8, 9)
1259 struct drm_plane *plane, 2243int drm_universal_plane_init(struct drm_device *dev,
1260 unsigned long possible_crtcs, 2244 struct drm_plane *plane,
1261 const struct drm_plane_funcs *funcs, 2245 unsigned long possible_crtcs,
1262 const uint32_t *formats, 2246 const struct drm_plane_funcs *funcs,
1263 unsigned int format_count, 2247 const uint32_t *formats,
1264 enum drm_plane_type type); 2248 unsigned int format_count,
2249 enum drm_plane_type type,
2250 const char *name, ...);
1265extern int drm_plane_init(struct drm_device *dev, 2251extern int drm_plane_init(struct drm_device *dev,
1266 struct drm_plane *plane, 2252 struct drm_plane *plane,
1267 unsigned long possible_crtcs, 2253 unsigned long possible_crtcs,
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index e22ab29d2d00..4b37afa2b73b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -40,148 +40,7 @@
40#include <linux/fb.h> 40#include <linux/fb.h>
41 41
42#include <drm/drm_crtc.h> 42#include <drm/drm_crtc.h>
43 43#include <drm/drm_modeset_helper_vtables.h>
44enum mode_set_atomic {
45 LEAVE_ATOMIC_MODE_SET,
46 ENTER_ATOMIC_MODE_SET,
47};
48
49/**
50 * struct drm_crtc_helper_funcs - helper operations for CRTCs
51 * @dpms: set power state
52 * @prepare: prepare the CRTC, called before @mode_set
53 * @commit: commit changes to CRTC, called after @mode_set
54 * @mode_fixup: try to fixup proposed mode for this CRTC
55 * @mode_set: set this mode
56 * @mode_set_nofb: set mode only (no scanout buffer attached)
57 * @mode_set_base: update the scanout buffer
58 * @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
59 * @load_lut: load color palette
60 * @disable: disable CRTC when no longer in use
61 * @enable: enable CRTC
62 * @atomic_check: check for validity of an atomic state
63 * @atomic_begin: begin atomic update
64 * @atomic_flush: flush atomic update
65 *
66 * The helper operations are called by the mid-layer CRTC helper.
67 *
68 * Note that with atomic helpers @dpms, @prepare and @commit hooks are
69 * deprecated. Used @enable and @disable instead exclusively.
70 *
71 * With legacy crtc helpers there's a big semantic difference between @disable
72 * and the other hooks: @disable also needs to release any resources acquired in
73 * @mode_set (like shared PLLs).
74 */
75struct drm_crtc_helper_funcs {
76 /*
77 * Control power levels on the CRTC. If the mode passed in is
78 * unsupported, the provider must use the next lowest power level.
79 */
80 void (*dpms)(struct drm_crtc *crtc, int mode);
81 void (*prepare)(struct drm_crtc *crtc);
82 void (*commit)(struct drm_crtc *crtc);
83
84 /* Provider can fixup or change mode timings before modeset occurs */
85 bool (*mode_fixup)(struct drm_crtc *crtc,
86 const struct drm_display_mode *mode,
87 struct drm_display_mode *adjusted_mode);
88 /* Actually set the mode */
89 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
90 struct drm_display_mode *adjusted_mode, int x, int y,
91 struct drm_framebuffer *old_fb);
92 /* Actually set the mode for atomic helpers, optional */
93 void (*mode_set_nofb)(struct drm_crtc *crtc);
94
95 /* Move the crtc on the current fb to the given position *optional* */
96 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
97 struct drm_framebuffer *old_fb);
98 int (*mode_set_base_atomic)(struct drm_crtc *crtc,
99 struct drm_framebuffer *fb, int x, int y,
100 enum mode_set_atomic);
101
102 /* reload the current crtc LUT */
103 void (*load_lut)(struct drm_crtc *crtc);
104
105 void (*disable)(struct drm_crtc *crtc);
106 void (*enable)(struct drm_crtc *crtc);
107
108 /* atomic helpers */
109 int (*atomic_check)(struct drm_crtc *crtc,
110 struct drm_crtc_state *state);
111 void (*atomic_begin)(struct drm_crtc *crtc,
112 struct drm_crtc_state *old_crtc_state);
113 void (*atomic_flush)(struct drm_crtc *crtc,
114 struct drm_crtc_state *old_crtc_state);
115};
116
117/**
118 * struct drm_encoder_helper_funcs - helper operations for encoders
119 * @dpms: set power state
120 * @save: save connector state
121 * @restore: restore connector state
122 * @mode_fixup: try to fixup proposed mode for this connector
123 * @prepare: part of the disable sequence, called before the CRTC modeset
124 * @commit: called after the CRTC modeset
125 * @mode_set: set this mode, optional for atomic helpers
126 * @get_crtc: return CRTC that the encoder is currently attached to
127 * @detect: connection status detection
128 * @disable: disable encoder when not in use (overrides DPMS off)
129 * @enable: enable encoder
130 * @atomic_check: check for validity of an atomic update
131 *
132 * The helper operations are called by the mid-layer CRTC helper.
133 *
134 * Note that with atomic helpers @dpms, @prepare and @commit hooks are
135 * deprecated. Used @enable and @disable instead exclusively.
136 *
137 * With legacy crtc helpers there's a big semantic difference between @disable
138 * and the other hooks: @disable also needs to release any resources acquired in
139 * @mode_set (like shared PLLs).
140 */
141struct drm_encoder_helper_funcs {
142 void (*dpms)(struct drm_encoder *encoder, int mode);
143 void (*save)(struct drm_encoder *encoder);
144 void (*restore)(struct drm_encoder *encoder);
145
146 bool (*mode_fixup)(struct drm_encoder *encoder,
147 const struct drm_display_mode *mode,
148 struct drm_display_mode *adjusted_mode);
149 void (*prepare)(struct drm_encoder *encoder);
150 void (*commit)(struct drm_encoder *encoder);
151 void (*mode_set)(struct drm_encoder *encoder,
152 struct drm_display_mode *mode,
153 struct drm_display_mode *adjusted_mode);
154 struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
155 /* detect for DAC style encoders */
156 enum drm_connector_status (*detect)(struct drm_encoder *encoder,
157 struct drm_connector *connector);
158 void (*disable)(struct drm_encoder *encoder);
159
160 void (*enable)(struct drm_encoder *encoder);
161
162 /* atomic helpers */
163 int (*atomic_check)(struct drm_encoder *encoder,
164 struct drm_crtc_state *crtc_state,
165 struct drm_connector_state *conn_state);
166};
167
168/**
169 * struct drm_connector_helper_funcs - helper operations for connectors
170 * @get_modes: get mode list for this connector
171 * @mode_valid: is this mode valid on the given connector? (optional)
172 * @best_encoder: return the preferred encoder for this connector
173 * @atomic_best_encoder: atomic version of @best_encoder
174 *
175 * The helper operations are called by the mid-layer CRTC helper.
176 */
177struct drm_connector_helper_funcs {
178 int (*get_modes)(struct drm_connector *connector);
179 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
180 struct drm_display_mode *mode);
181 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
182 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
183 struct drm_connector_state *connector_state);
184};
185 44
186extern void drm_helper_disable_unused_functions(struct drm_device *dev); 45extern void drm_helper_disable_unused_functions(struct drm_device *dev);
187extern int drm_crtc_helper_set_config(struct drm_mode_set *set); 46extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
@@ -199,24 +58,6 @@ extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
199extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 58extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
200 const struct drm_mode_fb_cmd2 *mode_cmd); 59 const struct drm_mode_fb_cmd2 *mode_cmd);
201 60
202static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
203 const struct drm_crtc_helper_funcs *funcs)
204{
205 crtc->helper_private = funcs;
206}
207
208static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
209 const struct drm_encoder_helper_funcs *funcs)
210{
211 encoder->helper_private = funcs;
212}
213
214static inline void drm_connector_helper_add(struct drm_connector *connector,
215 const struct drm_connector_helper_funcs *funcs)
216{
217 connector->helper_private = funcs;
218}
219
220extern void drm_helper_resume_force_mode(struct drm_device *dev); 61extern void drm_helper_resume_force_mode(struct drm_device *dev);
221 62
222int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, 63int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -229,10 +70,6 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
229extern int drm_helper_probe_single_connector_modes(struct drm_connector 70extern int drm_helper_probe_single_connector_modes(struct drm_connector
230 *connector, uint32_t maxX, 71 *connector, uint32_t maxX,
231 uint32_t maxY); 72 uint32_t maxY);
232extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector
233 *connector,
234 uint32_t maxX,
235 uint32_t maxY);
236extern void drm_kms_helper_poll_init(struct drm_device *dev); 73extern void drm_kms_helper_poll_init(struct drm_device *dev);
237extern void drm_kms_helper_poll_fini(struct drm_device *dev); 74extern void drm_kms_helper_poll_fini(struct drm_device *dev);
238extern bool drm_helper_hpd_irq_event(struct drm_device *dev); 75extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 74b5888bbc73..24ab1787b771 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -421,7 +421,7 @@ struct drm_dp_payload {
421struct drm_dp_mst_topology_mgr { 421struct drm_dp_mst_topology_mgr {
422 422
423 struct device *dev; 423 struct device *dev;
424 struct drm_dp_mst_topology_cbs *cbs; 424 const struct drm_dp_mst_topology_cbs *cbs;
425 int max_dpcd_transaction_bytes; 425 int max_dpcd_transaction_bytes;
426 struct drm_dp_aux *aux; /* auxch for this topology mgr to use */ 426 struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
427 int max_payloads; 427 int max_payloads;
@@ -451,9 +451,7 @@ struct drm_dp_mst_topology_mgr {
451 the mstb tx_slots and txmsg->state once they are queued */ 451 the mstb tx_slots and txmsg->state once they are queued */
452 struct mutex qlock; 452 struct mutex qlock;
453 struct list_head tx_msg_downq; 453 struct list_head tx_msg_downq;
454 struct list_head tx_msg_upq;
455 bool tx_down_in_progress; 454 bool tx_down_in_progress;
456 bool tx_up_in_progress;
457 455
458 /* payload info + lock for it */ 456 /* payload info + lock for it */
459 struct mutex payload_lock; 457 struct mutex payload_lock;
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index 8b9cc3671858..82cdf611393d 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -95,7 +95,7 @@ struct drm_encoder_slave_funcs {
95struct drm_encoder_slave { 95struct drm_encoder_slave {
96 struct drm_encoder base; 96 struct drm_encoder base;
97 97
98 struct drm_encoder_slave_funcs *slave_funcs; 98 const struct drm_encoder_slave_funcs *slave_funcs;
99 void *slave_priv; 99 void *slave_priv;
100 void *bus_priv; 100 void *bus_priv;
101}; 101};
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 87b090c4b730..d8a40dff0d1d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,6 +34,11 @@ struct drm_fb_helper;
34 34
35#include <linux/kgdb.h> 35#include <linux/kgdb.h>
36 36
37enum mode_set_atomic {
38 LEAVE_ATOMIC_MODE_SET,
39 ENTER_ATOMIC_MODE_SET,
40};
41
37struct drm_fb_offset { 42struct drm_fb_offset {
38 int x, y; 43 int x, y;
39}; 44};
@@ -74,25 +79,76 @@ struct drm_fb_helper_surface_size {
74 79
75/** 80/**
76 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library 81 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
77 * @gamma_set: Set the given gamma lut register on the given crtc.
78 * @gamma_get: Read the given gamma lut register on the given crtc, used to
79 * save the current lut when force-restoring the fbdev for e.g.
80 * kdbg.
81 * @fb_probe: Driver callback to allocate and initialize the fbdev info
82 * structure. Furthermore it also needs to allocate the drm
83 * framebuffer used to back the fbdev.
84 * @initial_config: Setup an initial fbdev display configuration
85 * 82 *
86 * Driver callbacks used by the fbdev emulation helper library. 83 * Driver callbacks used by the fbdev emulation helper library.
87 */ 84 */
88struct drm_fb_helper_funcs { 85struct drm_fb_helper_funcs {
86 /**
87 * @gamma_set:
88 *
89 * Set the given gamma LUT register on the given CRTC.
90 *
91 * This callback is optional.
92 *
93 * FIXME:
94 *
95 * This callback is functionally redundant with the core gamma table
96 * support and simply exists because the fbdev hasn't yet been
97 * refactored to use the core gamma table interfaces.
98 */
89 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, 99 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
90 u16 blue, int regno); 100 u16 blue, int regno);
101 /**
102 * @gamma_get:
103 *
104 * Read the given gamma LUT register on the given CRTC, used to save the
105 * current LUT when force-restoring the fbdev for e.g. kdbg.
106 *
107 * This callback is optional.
108 *
109 * FIXME:
110 *
111 * This callback is functionally redundant with the core gamma table
112 * support and simply exists because the fbdev hasn't yet been
113 * refactored to use the core gamma table interfaces.
114 */
91 void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, 115 void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
92 u16 *blue, int regno); 116 u16 *blue, int regno);
93 117
118 /**
119 * @fb_probe:
120 *
121 * Driver callback to allocate and initialize the fbdev info structure.
122 * Furthermore it also needs to allocate the DRM framebuffer used to
123 * back the fbdev.
124 *
125 * This callback is mandatory.
126 *
127 * RETURNS:
128 *
129 * The driver should return 0 on success and a negative error code on
130 * failure.
131 */
94 int (*fb_probe)(struct drm_fb_helper *helper, 132 int (*fb_probe)(struct drm_fb_helper *helper,
95 struct drm_fb_helper_surface_size *sizes); 133 struct drm_fb_helper_surface_size *sizes);
134
135 /**
136 * @initial_config:
137 *
138 * Driver callback to setup an initial fbdev display configuration.
139 * Drivers can use this callback to tell the fbdev emulation what the
140 * preferred initial configuration is. This is useful to implement
141 * smooth booting where the fbdev (and subsequently all userspace) never
142 * changes the mode, but always inherits the existing configuration.
143 *
144 * This callback is optional.
145 *
146 * RETURNS:
147 *
148 * The driver should return true if a suitable initial configuration has
149 * been filled out and false when the fbdev helper should fall back to
150 * the default probing logic.
151 */
96 bool (*initial_config)(struct drm_fb_helper *fb_helper, 152 bool (*initial_config)(struct drm_fb_helper *fb_helper,
97 struct drm_fb_helper_crtc **crtcs, 153 struct drm_fb_helper_crtc **crtcs,
98 struct drm_display_mode **modes, 154 struct drm_display_mode **modes,
@@ -105,18 +161,22 @@ struct drm_fb_helper_connector {
105}; 161};
106 162
107/** 163/**
108 * struct drm_fb_helper - helper to emulate fbdev on top of kms 164 * struct drm_fb_helper - main structure to emulate fbdev on top of KMS
109 * @fb: Scanout framebuffer object 165 * @fb: Scanout framebuffer object
110 * @dev: DRM device 166 * @dev: DRM device
111 * @crtc_count: number of possible CRTCs 167 * @crtc_count: number of possible CRTCs
112 * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) 168 * @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
113 * @connector_count: number of connected connectors 169 * @connector_count: number of connected connectors
114 * @connector_info_alloc_count: size of connector_info 170 * @connector_info_alloc_count: size of connector_info
171 * @connector_info: array of per-connector information
115 * @funcs: driver callbacks for fb helper 172 * @funcs: driver callbacks for fb helper
116 * @fbdev: emulated fbdev device info struct 173 * @fbdev: emulated fbdev device info struct
117 * @pseudo_palette: fake palette of 16 colors 174 * @pseudo_palette: fake palette of 16 colors
118 * @kernel_fb_list: list_head in kernel_fb_helper_list 175 *
119 * @delayed_hotplug: was there a hotplug while kms master active? 176 * This is the main structure used by the fbdev helpers. Drivers supporting
177 * fbdev emulation should embedded this into their overall driver structure.
178 * Drivers must also fill out a struct &drm_fb_helper_funcs with a few
179 * operations.
120 */ 180 */
121struct drm_fb_helper { 181struct drm_fb_helper {
122 struct drm_framebuffer *fb; 182 struct drm_framebuffer *fb;
@@ -129,10 +189,21 @@ struct drm_fb_helper {
129 const struct drm_fb_helper_funcs *funcs; 189 const struct drm_fb_helper_funcs *funcs;
130 struct fb_info *fbdev; 190 struct fb_info *fbdev;
131 u32 pseudo_palette[17]; 191 u32 pseudo_palette[17];
192
193 /**
194 * @kernel_fb_list:
195 *
196 * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit.
197 */
132 struct list_head kernel_fb_list; 198 struct list_head kernel_fb_list;
133 199
134 /* we got a hotplug but fbdev wasn't running the console 200 /**
135 delay until next set_par */ 201 * @delayed_hotplug:
202 *
203 * A hotplug was received while fbdev wasn't in control of the DRM
204 * device, i.e. another KMS master was active. The output configuration
205 * needs to be reprobe when fbdev is in control again.
206 */
136 bool delayed_hotplug; 207 bool delayed_hotplug;
137 208
138 /** 209 /**
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index f1d8d0dbb4f1..1b3b1f8c8cdf 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -163,9 +163,36 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
163 return container_of(dev, struct mipi_dsi_device, dev); 163 return container_of(dev, struct mipi_dsi_device, dev);
164} 164}
165 165
166/**
167 * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
168 * given pixel format defined by the MIPI DSI
169 * specification
170 * @fmt: MIPI DSI pixel format
171 *
172 * Returns: The number of bits per pixel of the given pixel format.
173 */
174static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt)
175{
176 switch (fmt) {
177 case MIPI_DSI_FMT_RGB888:
178 case MIPI_DSI_FMT_RGB666:
179 return 24;
180
181 case MIPI_DSI_FMT_RGB666_PACKED:
182 return 18;
183
184 case MIPI_DSI_FMT_RGB565:
185 return 16;
186 }
187
188 return -EINVAL;
189}
190
166struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); 191struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
167int mipi_dsi_attach(struct mipi_dsi_device *dsi); 192int mipi_dsi_attach(struct mipi_dsi_device *dsi);
168int mipi_dsi_detach(struct mipi_dsi_device *dsi); 193int mipi_dsi_detach(struct mipi_dsi_device *dsi);
194int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
195int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
169int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, 196int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
170 u16 value); 197 u16 value);
171 198
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index f9115aee43f4..625966a906f2 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -35,46 +35,91 @@
35 * structures). 35 * structures).
36 */ 36 */
37 37
38/**
39 * enum drm_mode_status - hardware support status of a mode
40 * @MODE_OK: Mode OK
41 * @MODE_HSYNC: hsync out of range
42 * @MODE_VSYNC: vsync out of range
43 * @MODE_H_ILLEGAL: mode has illegal horizontal timings
44 * @MODE_V_ILLEGAL: mode has illegal horizontal timings
45 * @MODE_BAD_WIDTH: requires an unsupported linepitch
46 * @MODE_NOMODE: no mode with a matching name
47 * @MODE_NO_INTERLACE: interlaced mode not supported
48 * @MODE_NO_DBLESCAN: doublescan mode not supported
49 * @MODE_NO_VSCAN: multiscan mode not supported
50 * @MODE_MEM: insufficient video memory
51 * @MODE_VIRTUAL_X: mode width too large for specified virtual size
52 * @MODE_VIRTUAL_Y: mode height too large for specified virtual size
53 * @MODE_MEM_VIRT: insufficient video memory given virtual size
54 * @MODE_NOCLOCK: no fixed clock available
55 * @MODE_CLOCK_HIGH: clock required is too high
56 * @MODE_CLOCK_LOW: clock required is too low
57 * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange
58 * @MODE_BAD_HVALUE: horizontal timing was out of range
59 * @MODE_BAD_VVALUE: vertical timing was out of range
60 * @MODE_BAD_VSCAN: VScan value out of range
61 * @MODE_HSYNC_NARROW: horizontal sync too narrow
62 * @MODE_HSYNC_WIDE: horizontal sync too wide
63 * @MODE_HBLANK_NARROW: horizontal blanking too narrow
64 * @MODE_HBLANK_WIDE: horizontal blanking too wide
65 * @MODE_VSYNC_NARROW: vertical sync too narrow
66 * @MODE_VSYNC_WIDE: vertical sync too wide
67 * @MODE_VBLANK_NARROW: vertical blanking too narrow
68 * @MODE_VBLANK_WIDE: vertical blanking too wide
69 * @MODE_PANEL: exceeds panel dimensions
70 * @MODE_INTERLACE_WIDTH: width too large for interlaced mode
71 * @MODE_ONE_WIDTH: only one width is supported
72 * @MODE_ONE_HEIGHT: only one height is supported
73 * @MODE_ONE_SIZE: only one resolution is supported
74 * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking
75 * @MODE_NO_STEREO: stereo modes not supported
76 * @MODE_STALE: mode has become stale
77 * @MODE_BAD: unspecified reason
78 * @MODE_ERROR: error condition
79 *
80 * This enum is used to filter out modes not supported by the driver/hardware
81 * combination.
82 */
38enum drm_mode_status { 83enum drm_mode_status {
39 MODE_OK = 0, /* Mode OK */ 84 MODE_OK = 0,
40 MODE_HSYNC, /* hsync out of range */ 85 MODE_HSYNC,
41 MODE_VSYNC, /* vsync out of range */ 86 MODE_VSYNC,
42 MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ 87 MODE_H_ILLEGAL,
43 MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ 88 MODE_V_ILLEGAL,
44 MODE_BAD_WIDTH, /* requires an unsupported linepitch */ 89 MODE_BAD_WIDTH,
45 MODE_NOMODE, /* no mode with a matching name */ 90 MODE_NOMODE,
46 MODE_NO_INTERLACE, /* interlaced mode not supported */ 91 MODE_NO_INTERLACE,
47 MODE_NO_DBLESCAN, /* doublescan mode not supported */ 92 MODE_NO_DBLESCAN,
48 MODE_NO_VSCAN, /* multiscan mode not supported */ 93 MODE_NO_VSCAN,
49 MODE_MEM, /* insufficient video memory */ 94 MODE_MEM,
50 MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ 95 MODE_VIRTUAL_X,
51 MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ 96 MODE_VIRTUAL_Y,
52 MODE_MEM_VIRT, /* insufficient video memory given virtual size */ 97 MODE_MEM_VIRT,
53 MODE_NOCLOCK, /* no fixed clock available */ 98 MODE_NOCLOCK,
54 MODE_CLOCK_HIGH, /* clock required is too high */ 99 MODE_CLOCK_HIGH,
55 MODE_CLOCK_LOW, /* clock required is too low */ 100 MODE_CLOCK_LOW,
56 MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ 101 MODE_CLOCK_RANGE,
57 MODE_BAD_HVALUE, /* horizontal timing was out of range */ 102 MODE_BAD_HVALUE,
58 MODE_BAD_VVALUE, /* vertical timing was out of range */ 103 MODE_BAD_VVALUE,
59 MODE_BAD_VSCAN, /* VScan value out of range */ 104 MODE_BAD_VSCAN,
60 MODE_HSYNC_NARROW, /* horizontal sync too narrow */ 105 MODE_HSYNC_NARROW,
61 MODE_HSYNC_WIDE, /* horizontal sync too wide */ 106 MODE_HSYNC_WIDE,
62 MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ 107 MODE_HBLANK_NARROW,
63 MODE_HBLANK_WIDE, /* horizontal blanking too wide */ 108 MODE_HBLANK_WIDE,
64 MODE_VSYNC_NARROW, /* vertical sync too narrow */ 109 MODE_VSYNC_NARROW,
65 MODE_VSYNC_WIDE, /* vertical sync too wide */ 110 MODE_VSYNC_WIDE,
66 MODE_VBLANK_NARROW, /* vertical blanking too narrow */ 111 MODE_VBLANK_NARROW,
67 MODE_VBLANK_WIDE, /* vertical blanking too wide */ 112 MODE_VBLANK_WIDE,
68 MODE_PANEL, /* exceeds panel dimensions */ 113 MODE_PANEL,
69 MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ 114 MODE_INTERLACE_WIDTH,
70 MODE_ONE_WIDTH, /* only one width is supported */ 115 MODE_ONE_WIDTH,
71 MODE_ONE_HEIGHT, /* only one height is supported */ 116 MODE_ONE_HEIGHT,
72 MODE_ONE_SIZE, /* only one resolution is supported */ 117 MODE_ONE_SIZE,
73 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ 118 MODE_NO_REDUCED,
74 MODE_NO_STEREO, /* stereo modes not supported */ 119 MODE_NO_STEREO,
75 MODE_UNVERIFIED = -3, /* mode needs to reverified */ 120 MODE_STALE = -3,
76 MODE_BAD = -2, /* unspecified reason */ 121 MODE_BAD = -2,
77 MODE_ERROR = -1 /* error condition */ 122 MODE_ERROR = -1
78}; 123};
79 124
80#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ 125#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
@@ -96,17 +141,125 @@ enum drm_mode_status {
96 141
97#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF 142#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
98 143
144/**
145 * struct drm_display_mode - DRM kernel-internal display mode structure
146 * @hdisplay: horizontal display size
147 * @hsync_start: horizontal sync start
148 * @hsync_end: horizontal sync end
149 * @htotal: horizontal total size
150 * @hskew: horizontal skew?!
151 * @vdisplay: vertical display size
152 * @vsync_start: vertical sync start
153 * @vsync_end: vertical sync end
154 * @vtotal: vertical total size
155 * @vscan: vertical scan?!
156 * @crtc_hdisplay: hardware mode horizontal display size
157 * @crtc_hblank_start: hardware mode horizontal blank start
158 * @crtc_hblank_end: hardware mode horizontal blank end
159 * @crtc_hsync_start: hardware mode horizontal sync start
160 * @crtc_hsync_end: hardware mode horizontal sync end
161 * @crtc_htotal: hardware mode horizontal total size
162 * @crtc_hskew: hardware mode horizontal skew?!
163 * @crtc_vdisplay: hardware mode vertical display size
164 * @crtc_vblank_start: hardware mode vertical blank start
165 * @crtc_vblank_end: hardware mode vertical blank end
166 * @crtc_vsync_start: hardware mode vertical sync start
167 * @crtc_vsync_end: hardware mode vertical sync end
168 * @crtc_vtotal: hardware mode vertical total size
169 *
170 * The horizontal and vertical timings are defined per the following diagram.
171 *
172 *
173 * Active Front Sync Back
174 * Region Porch Porch
175 * <-----------------------><----------------><-------------><-------------->
176 * //////////////////////|
177 * ////////////////////// |
178 * ////////////////////// |.................. ................
179 * _______________
180 * <----- [hv]display ----->
181 * <------------- [hv]sync_start ------------>
182 * <--------------------- [hv]sync_end --------------------->
183 * <-------------------------------- [hv]total ----------------------------->*
184 *
185 * This structure contains two copies of timings. First are the plain timings,
186 * which specify the logical mode, as it would be for a progressive 1:1 scanout
187 * at the refresh rate userspace can observe through vblank timestamps. Then
188 * there's the hardware timings, which are corrected for interlacing,
189 * double-clocking and similar things. They are provided as a convenience, and
190 * can be appropriately computed using drm_mode_set_crtcinfo().
191 */
99struct drm_display_mode { 192struct drm_display_mode {
100 /* Header */ 193 /**
194 * @head:
195 *
196 * struct list_head for mode lists.
197 */
101 struct list_head head; 198 struct list_head head;
199
200 /**
201 * @base:
202 *
203 * A display mode is a normal modeset object, possibly including public
204 * userspace id.
205 *
206 * FIXME:
207 *
208 * This can probably be removed since the entire concept of userspace
209 * managing modes explicitly has never landed in upstream kernel mode
210 * setting support.
211 */
102 struct drm_mode_object base; 212 struct drm_mode_object base;
103 213
214 /**
215 * @name:
216 *
217 * Human-readable name of the mode, filled out with drm_mode_set_name().
218 */
104 char name[DRM_DISPLAY_MODE_LEN]; 219 char name[DRM_DISPLAY_MODE_LEN];
105 220
221 /**
222 * @status:
223 *
224 * Status of the mode, used to filter out modes not supported by the
225 * hardware. See enum &drm_mode_status.
226 */
106 enum drm_mode_status status; 227 enum drm_mode_status status;
228
229 /**
230 * @type:
231 *
232 * A bitmask of flags, mostly about the source of a mode. Possible flags
233 * are:
234 *
235 * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively
236 * unused.
237 * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
238 * resolution of an LCD panel. There should only be one preferred
239 * mode per connector at any given time.
240 * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
241 * them really. Drivers must set this bit for all modes they create
242 * and expose to userspace.
243 *
244 * Plus a big list of flags which shouldn't be used at all, but are
245 * still around since these flags are also used in the userspace ABI:
246 *
247 * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
248 * DRM_MODE_TYPE_PREFERRED instead.
249 * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
250 * which are stuck around for hysterical raisins only. No one has an
251 * idea what they were meant for. Don't use.
252 * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige
253 * from older kms designs where userspace had to first add a custom
254 * mode to the kernel's mode list before it could use it. Don't use.
255 */
107 unsigned int type; 256 unsigned int type;
108 257
109 /* Proposed mode values */ 258 /**
259 * @clock:
260 *
261 * Pixel clock in kHz.
262 */
110 int clock; /* in kHz */ 263 int clock; /* in kHz */
111 int hdisplay; 264 int hdisplay;
112 int hsync_start; 265 int hsync_start;
@@ -118,14 +271,74 @@ struct drm_display_mode {
118 int vsync_end; 271 int vsync_end;
119 int vtotal; 272 int vtotal;
120 int vscan; 273 int vscan;
274 /**
275 * @flags:
276 *
277 * Sync and timing flags:
278 *
279 * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high.
280 * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low.
281 * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high.
282 * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low.
283 * - DRM_MODE_FLAG_INTERLACE: mode is interlaced.
284 * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan.
285 * - DRM_MODE_FLAG_CSYNC: mode uses composite sync.
286 * - DRM_MODE_FLAG_PCSYNC: composite sync is active high.
287 * - DRM_MODE_FLAG_NCSYNC: composite sync is active low.
288 * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?).
289 * - DRM_MODE_FLAG_BCAST: not used?
290 * - DRM_MODE_FLAG_PIXMUX: not used?
291 * - DRM_MODE_FLAG_DBLCLK: double-clocked mode.
292 * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode.
293 *
294 * Additionally there's flags to specify how 3D modes are packed:
295 *
296 * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode.
297 * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right.
298 * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields.
299 * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines.
300 * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames.
301 * - DRM_MODE_FLAG_3D_L_DEPTH: ?
302 * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ?
303 * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom
304 * parts.
305 * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and
306 * right parts.
307 */
121 unsigned int flags; 308 unsigned int flags;
122 309
123 /* Addressable image size (may be 0 for projectors, etc.) */ 310 /**
311 * @width_mm:
312 *
313 * Addressable size of the output in mm, projectors should set this to
314 * 0.
315 */
124 int width_mm; 316 int width_mm;
317
318 /**
319 * @height_mm:
320 *
321 * Addressable size of the output in mm, projectors should set this to
322 * 0.
323 */
125 int height_mm; 324 int height_mm;
126 325
127 /* Actual mode we give to hw */ 326 /**
128 int crtc_clock; /* in KHz */ 327 * @crtc_clock:
328 *
329 * Actual pixel or dot clock in the hardware. This differs from the
330 * logical @clock when e.g. using interlacing, double-clocking, stereo
331 * modes or other fancy stuff that changes the timings and signals
332 * actually sent over the wire.
333 *
334 * This is again in kHz.
335 *
336 * Note that with digital outputs like HDMI or DP there's usually a
337 * massive confusion between the dot clock and the signal clock at the
338 * bit encoding level. Especially when a 8b/10b encoding is used and the
339 * difference is exactly a factor of 10.
340 */
341 int crtc_clock;
129 int crtc_hdisplay; 342 int crtc_hdisplay;
130 int crtc_hblank_start; 343 int crtc_hblank_start;
131 int crtc_hblank_end; 344 int crtc_hblank_end;
@@ -140,12 +353,48 @@ struct drm_display_mode {
140 int crtc_vsync_end; 353 int crtc_vsync_end;
141 int crtc_vtotal; 354 int crtc_vtotal;
142 355
143 /* Driver private mode info */ 356 /**
357 * @private:
358 *
359 * Pointer for driver private data. This can only be used for mode
360 * objects passed to drivers in modeset operations. It shouldn't be used
361 * by atomic drivers since they can store any additional data by
362 * subclassing state structures.
363 */
144 int *private; 364 int *private;
365
366 /**
367 * @private_flags:
368 *
369 * Similar to @private, but just an integer.
370 */
145 int private_flags; 371 int private_flags;
146 372
147 int vrefresh; /* in Hz */ 373 /**
148 int hsync; /* in kHz */ 374 * @vrefresh:
375 *
376 * Vertical refresh rate, for debug output in human readable form. Not
377 * used in a functional way.
378 *
379 * This value is in Hz.
380 */
381 int vrefresh;
382
383 /**
384 * @hsync:
385 *
386 * Horizontal refresh rate, for debug output in human readable form. Not
387 * used in a functional way.
388 *
389 * This value is in kHz.
390 */
391 int hsync;
392
393 /**
394 * @picture_aspect_ratio:
395 *
396 * Field for setting the HDMI picture aspect ratio of a mode.
397 */
149 enum hdmi_picture_aspect picture_aspect_ratio; 398 enum hdmi_picture_aspect picture_aspect_ratio;
150}; 399};
151 400
@@ -234,7 +483,7 @@ enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
234void drm_mode_prune_invalid(struct drm_device *dev, 483void drm_mode_prune_invalid(struct drm_device *dev,
235 struct list_head *mode_list, bool verbose); 484 struct list_head *mode_list, bool verbose);
236void drm_mode_sort(struct list_head *mode_list); 485void drm_mode_sort(struct list_head *mode_list);
237void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits); 486void drm_mode_connector_list_update(struct drm_connector *connector);
238 487
239/* parsing cmdline modes */ 488/* parsing cmdline modes */
240bool 489bool
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
new file mode 100644
index 000000000000..a126a0d7aed4
--- /dev/null
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -0,0 +1,928 @@
1/*
2 * Copyright © 2006 Keith Packard
3 * Copyright © 2007-2008 Dave Airlie
4 * Copyright © 2007-2008 Intel Corporation
5 * Jesse Barnes <jesse.barnes@intel.com>
6 * Copyright © 2011-2013 Intel Corporation
7 * Copyright © 2015 Intel Corporation
8 * Daniel Vetter <daniel.vetter@ffwll.ch>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef __DRM_MODESET_HELPER_VTABLES_H__
30#define __DRM_MODESET_HELPER_VTABLES_H__
31
32#include <drm/drm_crtc.h>
33
34/**
35 * DOC: overview
36 *
37 * The DRM mode setting helper functions are common code for drivers to use if
38 * they wish. Drivers are not forced to use this code in their
39 * implementations but it would be useful if the code they do use at least
40 * provides a consistent interface and operation to userspace. Therefore it is
41 * highly recommended to use the provided helpers as much as possible.
42 *
43 * Because there is only one pointer per modeset object to hold a vfunc table
44 * for helper libraries they are by necessity shared among the different
45 * helpers.
46 *
47 * To make this clear all the helper vtables are pulled together in this location here.
48 */
49
50enum mode_set_atomic;
51
52/**
53 * struct drm_crtc_helper_funcs - helper operations for CRTCs
54 *
55 * These hooks are used by the legacy CRTC helpers, the transitional plane
56 * helpers and the new atomic modesetting helpers.
57 */
58struct drm_crtc_helper_funcs {
59 /**
60 * @dpms:
61 *
62 * Callback to control power levels on the CRTC. If the mode passed in
63 * is unsupported, the provider must use the next lowest power level.
64 * This is used by the legacy CRTC helpers to implement DPMS
65 * functionality in drm_helper_connector_dpms().
66 *
67 * This callback is also used to disable a CRTC by calling it with
68 * DRM_MODE_DPMS_OFF if the @disable hook isn't used.
69 *
70 * This callback is used by the legacy CRTC helpers. Atomic helpers
71 * also support using this hook for enabling and disabling a CRTC to
72 * facilitate transitions to atomic, but it is deprecated. Instead
73 * @enable and @disable should be used.
74 */
75 void (*dpms)(struct drm_crtc *crtc, int mode);
76
77 /**
78 * @prepare:
79 *
80 * This callback should prepare the CRTC for a subsequent modeset, which
81 * in practice means the driver should disable the CRTC if it is
82 * running. Most drivers ended up implementing this by calling their
83 * @dpms hook with DRM_MODE_DPMS_OFF.
84 *
85 * This callback is used by the legacy CRTC helpers. Atomic helpers
86 * also support using this hook for disabling a CRTC to facilitate
87 * transitions to atomic, but it is deprecated. Instead @disable should
88 * be used.
89 */
90 void (*prepare)(struct drm_crtc *crtc);
91
92 /**
93 * @commit:
94 *
95 * This callback should commit the new mode on the CRTC after a modeset,
96 * which in practice means the driver should enable the CRTC. Most
97 * drivers ended up implementing this by calling their @dpms hook with
98 * DRM_MODE_DPMS_ON.
99 *
100 * This callback is used by the legacy CRTC helpers. Atomic helpers
101 * also support using this hook for enabling a CRTC to facilitate
102 * transitions to atomic, but it is deprecated. Instead @enable should
103 * be used.
104 */
105 void (*commit)(struct drm_crtc *crtc);
106
107 /**
108 * @mode_fixup:
109 *
110 * This callback is used to validate a mode. The parameter mode is the
111 * display mode that userspace requested, adjusted_mode is the mode the
112 * encoders need to be fed with. Note that this is the inverse semantics
113 * of the meaning for the &drm_encoder and &drm_bridge
114 * ->mode_fixup() functions. If the CRTC cannot support the requested
115 * conversion from mode to adjusted_mode it should reject the modeset.
116 *
117 * This function is used by both legacy CRTC helpers and atomic helpers.
118 * With atomic helpers it is optional.
119 *
120 * NOTE:
121 *
122 * This function is called in the check phase of atomic modesets, which
123 * can be aborted for any reason (including on userspace's request to
124 * just check whether a configuration would be possible). Atomic drivers
125 * MUST NOT touch any persistent state (hardware or software) or data
126 * structures except the passed in adjusted_mode parameter.
127 *
128 * This is in contrast to the legacy CRTC helpers where this was
129 * allowed.
130 *
131 * Atomic drivers which need to inspect and adjust more state should
132 * instead use the @atomic_check callback.
133 *
134 * Also beware that neither core nor helpers filter modes before
135 * passing them to the driver: While the list of modes that is
136 * advertised to userspace is filtered using the connector's
137 * ->mode_valid() callback, neither the core nor the helpers do any
138 * filtering on modes passed in from userspace when setting a mode. It
139 * is therefore possible for userspace to pass in a mode that was
140 * previously filtered out using ->mode_valid() or add a custom mode
141 * that wasn't probed from EDID or similar to begin with. Even though
142 * this is an advanced feature and rarely used nowadays, some users rely
143 * on being able to specify modes manually so drivers must be prepared
144 * to deal with it. Specifically this means that all drivers need not
145 * only validate modes in ->mode_valid() but also in ->mode_fixup() to
146 * make sure invalid modes passed in from userspace are rejected.
147 *
148 * RETURNS:
149 *
150 * True if an acceptable configuration is possible, false if the modeset
151 * operation should be rejected.
152 */
153 bool (*mode_fixup)(struct drm_crtc *crtc,
154 const struct drm_display_mode *mode,
155 struct drm_display_mode *adjusted_mode);
156
157 /**
158 * @mode_set:
159 *
160 * This callback is used by the legacy CRTC helpers to set a new mode,
161 * position and framebuffer. Since it ties the primary plane to every
162 * mode change it is incompatible with universal plane support. And
163 * since it can't update other planes it's incompatible with atomic
164 * modeset support.
165 *
166 * This callback is only used by CRTC helpers and deprecated.
167 *
168 * RETURNS:
169 *
170 * 0 on success or a negative error code on failure.
171 */
172 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
173 struct drm_display_mode *adjusted_mode, int x, int y,
174 struct drm_framebuffer *old_fb);
175
176 /**
177 * @mode_set_nofb:
178 *
179 * This callback is used to update the display mode of a CRTC without
180 * changing anything of the primary plane configuration. This fits the
181 * requirement of atomic and hence is used by the atomic helpers. It is
182 * also used by the transitional plane helpers to implement a
183 * @mode_set hook in drm_helper_crtc_mode_set().
184 *
185 * Note that the display pipe is completely off when this function is
186 * called. Atomic drivers which need hardware to be running before they
187 * program the new display mode (e.g. because they implement runtime PM)
188 * should not use this hook. This is because the helper library calls
189 * this hook only once per mode change and not every time the display
190 * pipeline is suspended using either DPMS or the new "ACTIVE" property.
191 * Which means register values set in this callback might get reset when
192 * the CRTC is suspended, but not restored. Such drivers should instead
193 * move all their CRTC setup into the @enable callback.
194 *
195 * This callback is optional.
196 */
197 void (*mode_set_nofb)(struct drm_crtc *crtc);
198
199 /**
200 * @mode_set_base:
201 *
202 * This callback is used by the legacy CRTC helpers to set a new
203 * framebuffer and scanout position. It is optional and used as an
204 * optimized fast-path instead of a full mode set operation with all the
205 * resulting flickering. If it is not present
206 * drm_crtc_helper_set_config() will fall back to a full modeset, using
207 * the ->mode_set() callback. Since it can't update other planes it's
208 * incompatible with atomic modeset support.
209 *
210 * This callback is only used by the CRTC helpers and deprecated.
211 *
212 * RETURNS:
213 *
214 * 0 on success or a negative error code on failure.
215 */
216 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
217 struct drm_framebuffer *old_fb);
218
219 /**
220 * @mode_set_base_atomic:
221 *
222 * This callback is used by the fbdev helpers to set a new framebuffer
223 * and scanout without sleeping, i.e. from an atomic calling context. It
224 * is only used to implement kgdb support.
225 *
226 * This callback is optional and only needed for kgdb support in the fbdev
227 * helpers.
228 *
229 * RETURNS:
230 *
231 * 0 on success or a negative error code on failure.
232 */
233 int (*mode_set_base_atomic)(struct drm_crtc *crtc,
234 struct drm_framebuffer *fb, int x, int y,
235 enum mode_set_atomic);
236
237 /**
238 * @load_lut:
239 *
240 * Load a LUT prepared with the @gamma_set functions from
241 * &drm_fb_helper_funcs.
242 *
243 * This callback is optional and is only used by the fbdev emulation
244 * helpers.
245 *
246 * FIXME:
247 *
248 * This callback is functionally redundant with the core gamma table
249 * support and simply exists because the fbdev hasn't yet been
250 * refactored to use the core gamma table interfaces.
251 */
252 void (*load_lut)(struct drm_crtc *crtc);
253
254 /**
255 * @disable:
256 *
257 * This callback should be used to disable the CRTC. With the atomic
258 * drivers it is called after all encoders connected to this CRTC have
259 * been shut off already using their own ->disable hook. If that
260 * sequence is too simple drivers can just add their own hooks and call
261 * it from this CRTC callback here by looping over all encoders
262 * connected to it using for_each_encoder_on_crtc().
263 *
264 * This hook is used both by legacy CRTC helpers and atomic helpers.
265 * Atomic drivers don't need to implement it if there's no need to
266 * disable anything at the CRTC level. To ensure that runtime PM
267 * handling (using either DPMS or the new "ACTIVE" property) works
268 * @disable must be the inverse of @enable for atomic drivers.
269 *
270 * NOTE:
271 *
272 * With legacy CRTC helpers there's a big semantic difference between
273 * @disable and other hooks (like @prepare or @dpms) used to shut down a
274 * CRTC: @disable is only called when also logically disabling the
275 * display pipeline and needs to release any resources acquired in
276 * @mode_set (like shared PLLs, or again release pinned framebuffers).
277 *
278 * Therefore @disable must be the inverse of @mode_set plus @commit for
279 * drivers still using legacy CRTC helpers, which is different from the
280 * rules under atomic.
281 */
282 void (*disable)(struct drm_crtc *crtc);
283
284 /**
285 * @enable:
286 *
287 * This callback should be used to enable the CRTC. With the atomic
288 * drivers it is called before all encoders connected to this CRTC are
289 * enabled through the encoder's own ->enable hook. If that sequence is
290 * too simple drivers can just add their own hooks and call it from this
291 * CRTC callback here by looping over all encoders connected to it using
292 * for_each_encoder_on_crtc().
293 *
294 * This hook is used only by atomic helpers, for symmetry with @disable.
295 * Atomic drivers don't need to implement it if there's no need to
296 * enable anything at the CRTC level. To ensure that runtime PM handling
297 * (using either DPMS or the new "ACTIVE" property) works
298 * @enable must be the inverse of @disable for atomic drivers.
299 */
300 void (*enable)(struct drm_crtc *crtc);
301
302 /**
303 * @atomic_check:
304 *
305 * Drivers should check plane-update related CRTC constraints in this
306 * hook. They can also check mode related limitations but need to be
307 * aware of the calling order, since this hook is used by
308 * drm_atomic_helper_check_planes() whereas the preparations needed to
309 * check output routing and the display mode is done in
310 * drm_atomic_helper_check_modeset(). Therefore drivers that want to
311 * check output routing and display mode constraints in this callback
312 * must ensure that drm_atomic_helper_check_modeset() has been called
313 * beforehand. This is calling order used by the default helper
314 * implementation in drm_atomic_helper_check().
315 *
316 * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check()
317 * hooks are called after the ones for planes, which allows drivers to
318 * assign shared resources requested by planes in the CRTC callback
319 * here. For more complicated dependencies the driver can call the provided
320 * check helpers multiple times until the computed state has a final
321 * configuration and everything has been checked.
322 *
323 * This function is also allowed to inspect any other object's state and
324 * can add more state objects to the atomic commit if needed. Care must
325 * be taken though to ensure that state check&compute functions for
326 * these added states are all called, and derived state in other objects
327 * all updated. Again the recommendation is to just call check helpers
328 * until a maximal configuration is reached.
329 *
330 * This callback is used by the atomic modeset helpers and by the
331 * transitional plane helpers, but it is optional.
332 *
333 * NOTE:
334 *
335 * This function is called in the check phase of an atomic update. The
336 * driver is not allowed to change anything outside of the free-standing
337 * state objects passed-in or assembled in the overall &drm_atomic_state
338 * update tracking structure.
339 *
340 * RETURNS:
341 *
342 * 0 on success, -EINVAL if the state or the transition can't be
343 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
344 * attempt to obtain another state object ran into a &drm_modeset_lock
345 * deadlock.
346 */
347 int (*atomic_check)(struct drm_crtc *crtc,
348 struct drm_crtc_state *state);
349
350 /**
351 * @atomic_begin:
352 *
353 * Drivers should prepare for an atomic update of multiple planes on
354 * a CRTC in this hook. Depending upon hardware this might be vblank
355 * evasion, blocking updates by setting bits or doing preparatory work
356 * for e.g. manual update display.
357 *
358 * This hook is called before any plane commit functions are called.
359 *
360 * Note that the power state of the display pipe when this function is
361 * called depends upon the exact helpers and calling sequence the driver
362 * has picked. See drm_atomic_commit_planes() for a discussion of the
363 * tradeoffs and variants of plane commit helpers.
364 *
365 * This callback is used by the atomic modeset helpers and by the
366 * transitional plane helpers, but it is optional.
367 */
368 void (*atomic_begin)(struct drm_crtc *crtc,
369 struct drm_crtc_state *old_crtc_state);
370 /**
371 * @atomic_flush:
372 *
373 * Drivers should finalize an atomic update of multiple planes on
374 * a CRTC in this hook. Depending upon hardware this might include
375 * checking that vblank evasion was successful, unblocking updates by
376 * setting bits or setting the GO bit to flush out all updates.
377 *
378 * Simple hardware or hardware with special requirements can commit and
379 * flush out all updates for all planes from this hook and forgo all the
380 * other commit hooks for plane updates.
381 *
382 * This hook is called after any plane commit functions are called.
383 *
384 * Note that the power state of the display pipe when this function is
385 * called depends upon the exact helpers and calling sequence the driver
386 * has picked. See drm_atomic_commit_planes() for a discussion of the
387 * tradeoffs and variants of plane commit helpers.
388 *
389 * This callback is used by the atomic modeset helpers and by the
390 * transitional plane helpers, but it is optional.
391 */
392 void (*atomic_flush)(struct drm_crtc *crtc,
393 struct drm_crtc_state *old_crtc_state);
394};
395
396/**
397 * drm_crtc_helper_add - sets the helper vtable for a crtc
398 * @crtc: DRM CRTC
399 * @funcs: helper vtable to set for @crtc
400 */
401static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
402 const struct drm_crtc_helper_funcs *funcs)
403{
404 crtc->helper_private = funcs;
405}
406
407/**
408 * struct drm_encoder_helper_funcs - helper operations for encoders
409 *
410 * These hooks are used by the legacy CRTC helpers, the transitional plane
411 * helpers and the new atomic modesetting helpers.
412 */
413struct drm_encoder_helper_funcs {
414 /**
415 * @dpms:
416 *
417 * Callback to control power levels on the encoder. If the mode passed in
418 * is unsupported, the provider must use the next lowest power level.
419 * This is used by the legacy encoder helpers to implement DPMS
420 * functionality in drm_helper_connector_dpms().
421 *
422 * This callback is also used to disable an encoder by calling it with
423 * DRM_MODE_DPMS_OFF if the @disable hook isn't used.
424 *
425 * This callback is used by the legacy CRTC helpers. Atomic helpers
426 * also support using this hook for enabling and disabling an encoder to
427 * facilitate transitions to atomic, but it is deprecated. Instead
428 * @enable and @disable should be used.
429 */
430 void (*dpms)(struct drm_encoder *encoder, int mode);
431
432 /**
433 * @mode_fixup:
434 *
435 * This callback is used to validate and adjust a mode. The parameter
436 * mode is the display mode that should be fed to the next element in
437 * the display chain, either the final &drm_connector or a &drm_bridge.
438 * The parameter adjusted_mode is the input mode the encoder requires. It
439 * can be modified by this callback and does not need to match mode.
440 *
441 * This function is used by both legacy CRTC helpers and atomic helpers.
442 * With atomic helpers it is optional.
443 *
444 * NOTE:
445 *
446 * This function is called in the check phase of atomic modesets, which
447 * can be aborted for any reason (including on userspace's request to
448 * just check whether a configuration would be possible). Atomic drivers
449 * MUST NOT touch any persistent state (hardware or software) or data
450 * structures except the passed in adjusted_mode parameter.
451 *
452 * This is in contrast to the legacy CRTC helpers where this was
453 * allowed.
454 *
455 * Atomic drivers which need to inspect and adjust more state should
456 * instead use the @atomic_check callback.
457 *
458 * Also beware that neither core nor helpers filter modes before
459 * passing them to the driver: While the list of modes that is
460 * advertised to userspace is filtered using the connector's
461 * ->mode_valid() callback, neither the core nor the helpers do any
462 * filtering on modes passed in from userspace when setting a mode. It
463 * is therefore possible for userspace to pass in a mode that was
464 * previously filtered out using ->mode_valid() or add a custom mode
465 * that wasn't probed from EDID or similar to begin with. Even though
466 * this is an advanced feature and rarely used nowadays, some users rely
467 * on being able to specify modes manually so drivers must be prepared
468 * to deal with it. Specifically this means that all drivers need not
469 * only validate modes in ->mode_valid() but also in ->mode_fixup() to
470 * make sure invalid modes passed in from userspace are rejected.
471 *
472 * RETURNS:
473 *
474 * True if an acceptable configuration is possible, false if the modeset
475 * operation should be rejected.
476 */
477 bool (*mode_fixup)(struct drm_encoder *encoder,
478 const struct drm_display_mode *mode,
479 struct drm_display_mode *adjusted_mode);
480
481 /**
482 * @prepare:
483 *
484 * This callback should prepare the encoder for a subsequent modeset,
485 * which in practice means the driver should disable the encoder if it
486 * is running. Most drivers ended up implementing this by calling their
487 * @dpms hook with DRM_MODE_DPMS_OFF.
488 *
489 * This callback is used by the legacy CRTC helpers. Atomic helpers
490 * also support using this hook for disabling an encoder to facilitate
491 * transitions to atomic, but it is deprecated. Instead @disable should
492 * be used.
493 */
494 void (*prepare)(struct drm_encoder *encoder);
495
496 /**
497 * @commit:
498 *
499 * This callback should commit the new mode on the encoder after a modeset,
500 * which in practice means the driver should enable the encoder. Most
501 * drivers ended up implementing this by calling their @dpms hook with
502 * DRM_MODE_DPMS_ON.
503 *
504 * This callback is used by the legacy CRTC helpers. Atomic helpers
505 * also support using this hook for enabling an encoder to facilitate
506 * transitions to atomic, but it is deprecated. Instead @enable should
507 * be used.
508 */
509 void (*commit)(struct drm_encoder *encoder);
510
511 /**
512 * @mode_set:
513 *
514 * This callback is used to update the display mode of an encoder.
515 *
516 * Note that the display pipe is completely off when this function is
517 * called. Drivers which need hardware to be running before they program
518 * the new display mode (because they implement runtime PM) should not
519 * use this hook, because the helper library calls it only once and not
520 * every time the display pipeline is suspend using either DPMS or the
521 * new "ACTIVE" property. Such drivers should instead move all their
522 * encoder setup into the ->enable() callback.
523 *
524 * This callback is used both by the legacy CRTC helpers and the atomic
525 * modeset helpers. It is optional in the atomic helpers.
526 */
527 void (*mode_set)(struct drm_encoder *encoder,
528 struct drm_display_mode *mode,
529 struct drm_display_mode *adjusted_mode);
530
531 /**
532 * @get_crtc:
533 *
534 * This callback is used by the legacy CRTC helpers to work around
535 * deficiencies in its own book-keeping.
536 *
537 * Do not use, use atomic helpers instead, which get the book keeping
538 * right.
539 *
540 * FIXME:
541 *
542 * Currently only nouveau is using this, and as soon as nouveau is
543 * atomic we can ditch this hook.
544 */
545 struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
546
547 /**
548 * @detect:
549 *
550 * This callback can be used by drivers who want to do detection on the
551 * encoder object instead of in connector functions.
552 *
553 * It is not used by any helper and therefore has purely driver-specific
554 * semantics. New drivers shouldn't use this and instead just implement
555 * their own private callbacks.
556 *
557 * FIXME:
558 *
559 * This should just be converted into a pile of driver vfuncs.
560 * Currently radeon, amdgpu and nouveau are using it.
561 */
562 enum drm_connector_status (*detect)(struct drm_encoder *encoder,
563 struct drm_connector *connector);
564
565 /**
566 * @disable:
567 *
568 * This callback should be used to disable the encoder. With the atomic
569 * drivers it is called before this encoder's CRTC has been shut off
570 * using the CRTC's own ->disable hook. If that sequence is too simple
571 * drivers can just add their own driver private encoder hooks and call
572 * them from CRTC's callback by looping over all encoders connected to
573 * it using for_each_encoder_on_crtc().
574 *
575 * This hook is used both by legacy CRTC helpers and atomic helpers.
576 * Atomic drivers don't need to implement it if there's no need to
577 * disable anything at the encoder level. To ensure that runtime PM
578 * handling (using either DPMS or the new "ACTIVE" property) works
579 * @disable must be the inverse of @enable for atomic drivers.
580 *
581 * NOTE:
582 *
583 * With legacy CRTC helpers there's a big semantic difference between
584 * @disable and other hooks (like @prepare or @dpms) used to shut down a
585 * encoder: @disable is only called when also logically disabling the
586 * display pipeline and needs to release any resources acquired in
587 * @mode_set (like shared PLLs, or again release pinned framebuffers).
588 *
589 * Therefore @disable must be the inverse of @mode_set plus @commit for
590 * drivers still using legacy CRTC helpers, which is different from the
591 * rules under atomic.
592 */
593 void (*disable)(struct drm_encoder *encoder);
594
595 /**
596 * @enable:
597 *
598 * This callback should be used to enable the encoder. With the atomic
599 * drivers it is called after this encoder's CRTC has been enabled using
600 * the CRTC's own ->enable hook. If that sequence is too simple drivers
601 * can just add their own driver private encoder hooks and call them
602 * from CRTC's callback by looping over all encoders connected to it
603 * using for_each_encoder_on_crtc().
604 *
605 * This hook is used only by atomic helpers, for symmetry with @disable.
606 * Atomic drivers don't need to implement it if there's no need to
607 * enable anything at the encoder level. To ensure that runtime PM handling
608 * (using either DPMS or the new "ACTIVE" property) works
609 * @enable must be the inverse of @disable for atomic drivers.
610 */
611 void (*enable)(struct drm_encoder *encoder);
612
613 /**
614 * @atomic_check:
615 *
616 * This callback is used to validate encoder state for atomic drivers.
617 * Since the encoder is the object connecting the CRTC and connector it
618 * gets passed both states, to be able to validate interactions and
619 * update the CRTC to match what the encoder needs for the requested
620 * connector.
621 *
622 * This function is used by the atomic helpers, but it is optional.
623 *
624 * NOTE:
625 *
626 * This function is called in the check phase of an atomic update. The
627 * driver is not allowed to change anything outside of the free-standing
628 * state objects passed-in or assembled in the overall &drm_atomic_state
629 * update tracking structure.
630 *
631 * RETURNS:
632 *
633 * 0 on success, -EINVAL if the state or the transition can't be
634 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
635 * attempt to obtain another state object ran into a &drm_modeset_lock
636 * deadlock.
637 */
638 int (*atomic_check)(struct drm_encoder *encoder,
639 struct drm_crtc_state *crtc_state,
640 struct drm_connector_state *conn_state);
641};
642
643/**
644 * drm_encoder_helper_add - sets the helper vtable for an encoder
645 * @encoder: DRM encoder
646 * @funcs: helper vtable to set for @encoder
647 */
648static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
649 const struct drm_encoder_helper_funcs *funcs)
650{
651 encoder->helper_private = funcs;
652}
653
654/**
655 * struct drm_connector_helper_funcs - helper operations for connectors
656 *
657 * These functions are used by the atomic and legacy modeset helpers and by the
658 * probe helpers.
659 */
660struct drm_connector_helper_funcs {
661 /**
662 * @get_modes:
663 *
664 * This function should fill in all modes currently valid for the sink
665 * into the connector->probed_modes list. It should also update the
666 * EDID property by calling drm_mode_connector_update_edid_property().
667 *
668 * The usual way to implement this is to cache the EDID retrieved in the
669 * probe callback somewhere in the driver-private connector structure.
670 * In this function drivers then parse the modes in the EDID and add
671 * them by calling drm_add_edid_modes(). But connectors that driver a
672 * fixed panel can also manually add specific modes using
673 * drm_mode_probed_add(). Drivers which manually add modes should also
674 * make sure that the @display_info, @width_mm and @height_mm fields of the
675 * struct #drm_connector are filled in.
676 *
677 * Virtual drivers that just want some standard VESA mode with a given
678 * resolution can call drm_add_modes_noedid(), and mark the preferred
679 * one using drm_set_preferred_mode().
680 *
681 * Finally drivers that support audio probably want to update the ELD
682 * data, too, using drm_edid_to_eld().
683 *
684 * This function is only called after the ->detect() hook has indicated
685 * that a sink is connected and when the EDID isn't overridden through
686 * sysfs or the kernel commandline.
687 *
688 * This callback is used by the probe helpers in e.g.
689 * drm_helper_probe_single_connector_modes().
690 *
691 * RETURNS:
692 *
693 * The number of modes added by calling drm_mode_probed_add().
694 */
695 int (*get_modes)(struct drm_connector *connector);
696
697 /**
698 * @mode_valid:
699 *
700 * Callback to validate a mode for a connector, irrespective of the
701 * specific display configuration.
702 *
703 * This callback is used by the probe helpers to filter the mode list
704 * (which is usually derived from the EDID data block from the sink).
705 * See e.g. drm_helper_probe_single_connector_modes().
706 *
707 * NOTE:
708 *
709 * This only filters the mode list supplied to userspace in the
710 * GETCONNECOTR IOCTL. Userspace is free to create modes of its own and
711 * ask the kernel to use them. It this case the atomic helpers or legacy
712 * CRTC helpers will not call this function. Drivers therefore must
713 * still fully validate any mode passed in in a modeset request.
714 *
715 * RETURNS:
716 *
717 * Either MODE_OK or one of the failure reasons in enum
718 * &drm_mode_status.
719 */
720 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
721 struct drm_display_mode *mode);
722 /**
723 * @best_encoder:
724 *
725 * This function should select the best encoder for the given connector.
726 *
727 * This function is used by both the atomic helpers (in the
728 * drm_atomic_helper_check_modeset() function) and in the legacy CRTC
729 * helpers.
730 *
731 * NOTE:
732 *
733 * In atomic drivers this function is called in the check phase of an
734 * atomic update. The driver is not allowed to change or inspect
735 * anything outside of arguments passed-in. Atomic drivers which need to
736 * inspect dynamic configuration state should instead use
737 * @atomic_best_encoder.
738 *
739 * RETURNS:
740 *
741 * Encoder that should be used for the given connector and connector
742 * state, or NULL if no suitable encoder exists. Note that the helpers
743 * will ensure that encoders aren't used twice, drivers should not check
744 * for this.
745 */
746 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
747
748 /**
749 * @atomic_best_encoder:
750 *
751 * This is the atomic version of @best_encoder for atomic drivers which
752 * need to select the best encoder depending upon the desired
753 * configuration and can't select it statically.
754 *
755 * This function is used by drm_atomic_helper_check_modeset() and either
756 * this or @best_encoder is required.
757 *
758 * NOTE:
759 *
760 * This function is called in the check phase of an atomic update. The
761 * driver is not allowed to change anything outside of the free-standing
762 * state objects passed-in or assembled in the overall &drm_atomic_state
763 * update tracking structure.
764 *
765 * RETURNS:
766 *
767 * Encoder that should be used for the given connector and connector
768 * state, or NULL if no suitable encoder exists. Note that the helpers
769 * will ensure that encoders aren't used twice, drivers should not check
770 * for this.
771 */
772 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
773 struct drm_connector_state *connector_state);
774};
775
776/**
777 * drm_connector_helper_add - sets the helper vtable for a connector
778 * @connector: DRM connector
779 * @funcs: helper vtable to set for @connector
780 */
781static inline void drm_connector_helper_add(struct drm_connector *connector,
782 const struct drm_connector_helper_funcs *funcs)
783{
784 connector->helper_private = funcs;
785}
786
787/**
788 * struct drm_plane_helper_funcs - helper operations for planes
789 *
790 * These functions are used by the atomic helpers and by the transitional plane
791 * helpers.
792 */
793struct drm_plane_helper_funcs {
794 /**
795 * @prepare_fb:
796 *
797 * This hook is to prepare a framebuffer for scanout by e.g. pinning
798 * it's backing storage or relocating it into a contiguous block of
799 * VRAM. Other possible preparatory work includes flushing caches.
800 *
801 * This function must not block for outstanding rendering, since it is
802 * called in the context of the atomic IOCTL even for async commits to
803 * be able to return any errors to userspace. Instead the recommended
804 * way is to fill out the fence member of the passed-in
805 * &drm_plane_state. If the driver doesn't support native fences then
806 * equivalent functionality should be implemented through private
807 * members in the plane structure.
808 *
809 * The helpers will call @cleanup_fb with matching arguments for every
810 * successful call to this hook.
811 *
812 * This callback is used by the atomic modeset helpers and by the
813 * transitional plane helpers, but it is optional.
814 *
815 * RETURNS:
816 *
817 * 0 on success or one of the following negative error codes allowed by
818 * the atomic_commit hook in &drm_mode_config_funcs. When using helpers
819 * this callback is the only one which can fail an atomic commit,
820 * everything else must complete successfully.
821 */
822 int (*prepare_fb)(struct drm_plane *plane,
823 const struct drm_plane_state *new_state);
824 /**
825 * @cleanup_fb:
826 *
827 * This hook is called to clean up any resources allocated for the given
828 * framebuffer and plane configuration in @prepare_fb.
829 *
830 * This callback is used by the atomic modeset helpers and by the
831 * transitional plane helpers, but it is optional.
832 */
833 void (*cleanup_fb)(struct drm_plane *plane,
834 const struct drm_plane_state *old_state);
835
836 /**
837 * @atomic_check:
838 *
839 * Drivers should check plane specific constraints in this hook.
840 *
841 * When using drm_atomic_helper_check_planes() plane's ->atomic_check()
842 * hooks are called before the ones for CRTCs, which allows drivers to
843 * request shared resources that the CRTC controls here. For more
844 * complicated dependencies the driver can call the provided check helpers
845 * multiple times until the computed state has a final configuration and
846 * everything has been checked.
847 *
848 * This function is also allowed to inspect any other object's state and
849 * can add more state objects to the atomic commit if needed. Care must
850 * be taken though to ensure that state check&compute functions for
851 * these added states are all called, and derived state in other objects
852 * all updated. Again the recommendation is to just call check helpers
853 * until a maximal configuration is reached.
854 *
855 * This callback is used by the atomic modeset helpers and by the
856 * transitional plane helpers, but it is optional.
857 *
858 * NOTE:
859 *
860 * This function is called in the check phase of an atomic update. The
861 * driver is not allowed to change anything outside of the free-standing
862 * state objects passed-in or assembled in the overall &drm_atomic_state
863 * update tracking structure.
864 *
865 * RETURNS:
866 *
867 * 0 on success, -EINVAL if the state or the transition can't be
868 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
869 * attempt to obtain another state object ran into a &drm_modeset_lock
870 * deadlock.
871 */
872 int (*atomic_check)(struct drm_plane *plane,
873 struct drm_plane_state *state);
874
875 /**
876 * @atomic_update:
877 *
878 * Drivers should use this function to update the plane state. This
879 * hook is called in-between the ->atomic_begin() and
880 * ->atomic_flush() of &drm_crtc_helper_funcs.
881 *
882 * Note that the power state of the display pipe when this function is
883 * called depends upon the exact helpers and calling sequence the driver
884 * has picked. See drm_atomic_commit_planes() for a discussion of the
885 * tradeoffs and variants of plane commit helpers.
886 *
887 * This callback is used by the atomic modeset helpers and by the
888 * transitional plane helpers, but it is optional.
889 */
890 void (*atomic_update)(struct drm_plane *plane,
891 struct drm_plane_state *old_state);
892 /**
893 * @atomic_disable:
894 *
895 * Drivers should use this function to unconditionally disable a plane.
896 * This hook is called in-between the ->atomic_begin() and
897 * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to
898 * @atomic_update, which will be called for disabling planes, too, if
899 * the @atomic_disable hook isn't implemented.
900 *
901 * This hook is also useful to disable planes in preparation of a modeset,
902 * by calling drm_atomic_helper_disable_planes_on_crtc() from the
903 * ->disable() hook in &drm_crtc_helper_funcs.
904 *
905 * Note that the power state of the display pipe when this function is
906 * called depends upon the exact helpers and calling sequence the driver
907 * has picked. See drm_atomic_commit_planes() for a discussion of the
908 * tradeoffs and variants of plane commit helpers.
909 *
910 * This callback is used by the atomic modeset helpers and by the
911 * transitional plane helpers, but it is optional.
912 */
913 void (*atomic_disable)(struct drm_plane *plane,
914 struct drm_plane_state *old_state);
915};
916
917/**
918 * drm_plane_helper_add - sets the helper vtable for a plane
919 * @plane: DRM plane
920 * @funcs: helper vtable to set for @plane
921 */
922static inline void drm_plane_helper_add(struct drm_plane *plane,
923 const struct drm_plane_helper_funcs *funcs)
924{
925 plane->helper_private = funcs;
926}
927
928#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 5a7f9d4efb1d..4421f3f4ca8d 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -26,6 +26,7 @@
26 26
27#include <drm/drm_rect.h> 27#include <drm/drm_rect.h>
28#include <drm/drm_crtc.h> 28#include <drm/drm_crtc.h>
29#include <drm/drm_modeset_helper_vtables.h>
29 30
30/* 31/*
31 * Drivers that don't allow primary plane scaling may pass this macro in place 32 * Drivers that don't allow primary plane scaling may pass this macro in place
@@ -36,46 +37,9 @@
36 */ 37 */
37#define DRM_PLANE_HELPER_NO_SCALING (1<<16) 38#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
38 39
39/**
40 * DOC: plane helpers
41 *
42 * Helper functions to assist with creation and handling of CRTC primary
43 * planes.
44 */
45
46int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 40int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
47 const struct drm_crtc_funcs *funcs); 41 const struct drm_crtc_funcs *funcs);
48 42
49/**
50 * drm_plane_helper_funcs - helper operations for CRTCs
51 * @prepare_fb: prepare a framebuffer for use by the plane
52 * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
53 * @atomic_check: check that a given atomic state is valid and can be applied
54 * @atomic_update: apply an atomic state to the plane (mandatory)
55 * @atomic_disable: disable the plane
56 *
57 * The helper operations are called by the mid-layer CRTC helper.
58 */
59struct drm_plane_helper_funcs {
60 int (*prepare_fb)(struct drm_plane *plane,
61 const struct drm_plane_state *new_state);
62 void (*cleanup_fb)(struct drm_plane *plane,
63 const struct drm_plane_state *old_state);
64
65 int (*atomic_check)(struct drm_plane *plane,
66 struct drm_plane_state *state);
67 void (*atomic_update)(struct drm_plane *plane,
68 struct drm_plane_state *old_state);
69 void (*atomic_disable)(struct drm_plane *plane,
70 struct drm_plane_state *old_state);
71};
72
73static inline void drm_plane_helper_add(struct drm_plane *plane,
74 const struct drm_plane_helper_funcs *funcs)
75{
76 plane->helper_private = funcs;
77}
78
79int drm_plane_helper_check_update(struct drm_plane *plane, 43int drm_plane_helper_check_update(struct drm_plane *plane,
80 struct drm_crtc *crtc, 44 struct drm_crtc *crtc,
81 struct drm_framebuffer *fb, 45 struct drm_framebuffer *fb,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c768ddfbe53c..afae2316bd43 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -383,6 +383,16 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
383 */ 383 */
384extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); 384extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
385 385
386/**
387 * ttm_bo_move_to_lru_tail
388 *
389 * @bo: The buffer object.
390 *
391 * Move this BO to the tail of all lru lists used to lookup and reserve an
392 * object. This function must be called with struct ttm_bo_global::lru_lock
393 * held, and is used to make a BO less likely to be considered for eviction.
394 */
395extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
386 396
387/** 397/**
388 * ttm_bo_lock_delayed_workqueue 398 * ttm_bo_lock_delayed_workqueue
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 813042cede57..3d4bf08aa21f 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -826,10 +826,10 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
826 * reserved, the validation sequence is checked against the validation 826 * reserved, the validation sequence is checked against the validation
827 * sequence of the process currently reserving the buffer, 827 * sequence of the process currently reserving the buffer,
828 * and if the current validation sequence is greater than that of the process 828 * and if the current validation sequence is greater than that of the process
829 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps 829 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
830 * waiting for the buffer to become unreserved, after which it retries 830 * waiting for the buffer to become unreserved, after which it retries
831 * reserving. 831 * reserving.
832 * The caller should, when receiving an -EAGAIN error 832 * The caller should, when receiving an -EDEADLK error
833 * release all its buffer reservations, wait for @bo to become unreserved, and 833 * release all its buffer reservations, wait for @bo to become unreserved, and
834 * then rerun the validation with the same validation sequence. This procedure 834 * then rerun the validation with the same validation sequence. This procedure
835 * will always guarantee that the process with the lowest validation sequence 835 * will always guarantee that the process with the lowest validation sequence
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2b8ed123ad36..defeaac0745f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
107 */ 107 */
108static inline __u32 rol32(__u32 word, unsigned int shift) 108static inline __u32 rol32(__u32 word, unsigned int shift)
109{ 109{
110 return (word << shift) | (word >> (32 - shift)); 110 return (word << shift) | (word >> ((-shift) & 31));
111} 111}
112 112
113/** 113/**
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 60d44b26276d..06b77f9dd3f2 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -90,7 +90,6 @@ enum {
90 */ 90 */
91struct cgroup_file { 91struct cgroup_file {
92 /* do not access any fields from outside cgroup core */ 92 /* do not access any fields from outside cgroup core */
93 struct list_head node; /* anchored at css->files */
94 struct kernfs_node *kn; 93 struct kernfs_node *kn;
95}; 94};
96 95
@@ -134,9 +133,6 @@ struct cgroup_subsys_state {
134 */ 133 */
135 u64 serial_nr; 134 u64 serial_nr;
136 135
137 /* all cgroup_files associated with this css */
138 struct list_head files;
139
140 /* percpu_ref killing and RCU release */ 136 /* percpu_ref killing and RCU release */
141 struct rcu_head rcu_head; 137 struct rcu_head rcu_head;
142 struct work_struct destroy_work; 138 struct work_struct destroy_work;
@@ -426,12 +422,9 @@ struct cgroup_subsys {
426 void (*css_reset)(struct cgroup_subsys_state *css); 422 void (*css_reset)(struct cgroup_subsys_state *css);
427 void (*css_e_css_changed)(struct cgroup_subsys_state *css); 423 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
428 424
429 int (*can_attach)(struct cgroup_subsys_state *css, 425 int (*can_attach)(struct cgroup_taskset *tset);
430 struct cgroup_taskset *tset); 426 void (*cancel_attach)(struct cgroup_taskset *tset);
431 void (*cancel_attach)(struct cgroup_subsys_state *css, 427 void (*attach)(struct cgroup_taskset *tset);
432 struct cgroup_taskset *tset);
433 void (*attach)(struct cgroup_subsys_state *css,
434 struct cgroup_taskset *tset);
435 int (*can_fork)(struct task_struct *task, void **priv_p); 428 int (*can_fork)(struct task_struct *task, void **priv_p);
436 void (*cancel_fork)(struct task_struct *task, void *priv); 429 void (*cancel_fork)(struct task_struct *task, void *priv);
437 void (*fork)(struct task_struct *task, void *priv); 430 void (*fork)(struct task_struct *task, void *priv);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 22e3754f89c5..cb91b44f5f78 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -88,6 +88,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
88int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 88int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
89int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 89int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
90int cgroup_rm_cftypes(struct cftype *cfts); 90int cgroup_rm_cftypes(struct cftype *cfts);
91void cgroup_file_notify(struct cgroup_file *cfile);
91 92
92char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); 93char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
93int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); 94int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -119,8 +120,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state
119struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, 120struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
120 struct cgroup_subsys_state *css); 121 struct cgroup_subsys_state *css);
121 122
122struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); 123struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
123struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); 124 struct cgroup_subsys_state **dst_cssp);
125struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
126 struct cgroup_subsys_state **dst_cssp);
124 127
125void css_task_iter_start(struct cgroup_subsys_state *css, 128void css_task_iter_start(struct cgroup_subsys_state *css,
126 struct css_task_iter *it); 129 struct css_task_iter *it);
@@ -235,30 +238,39 @@ void css_task_iter_end(struct css_task_iter *it);
235/** 238/**
236 * cgroup_taskset_for_each - iterate cgroup_taskset 239 * cgroup_taskset_for_each - iterate cgroup_taskset
237 * @task: the loop cursor 240 * @task: the loop cursor
241 * @dst_css: the destination css
238 * @tset: taskset to iterate 242 * @tset: taskset to iterate
239 * 243 *
240 * @tset may contain multiple tasks and they may belong to multiple 244 * @tset may contain multiple tasks and they may belong to multiple
241 * processes. When there are multiple tasks in @tset, if a task of a 245 * processes.
242 * process is in @tset, all tasks of the process are in @tset. Also, all 246 *
243 * are guaranteed to share the same source and destination csses. 247 * On the v2 hierarchy, there may be tasks from multiple processes and they
248 * may not share the source or destination csses.
249 *
250 * On traditional hierarchies, when there are multiple tasks in @tset, if a
251 * task of a process is in @tset, all tasks of the process are in @tset.
252 * Also, all are guaranteed to share the same source and destination csses.
244 * 253 *
245 * Iteration is not in any specific order. 254 * Iteration is not in any specific order.
246 */ 255 */
247#define cgroup_taskset_for_each(task, tset) \ 256#define cgroup_taskset_for_each(task, dst_css, tset) \
248 for ((task) = cgroup_taskset_first((tset)); (task); \ 257 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
249 (task) = cgroup_taskset_next((tset))) 258 (task); \
259 (task) = cgroup_taskset_next((tset), &(dst_css)))
250 260
251/** 261/**
252 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset 262 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
253 * @leader: the loop cursor 263 * @leader: the loop cursor
264 * @dst_css: the destination css
254 * @tset: takset to iterate 265 * @tset: takset to iterate
255 * 266 *
256 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset 267 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
257 * may not contain any. 268 * may not contain any.
258 */ 269 */
259#define cgroup_taskset_for_each_leader(leader, tset) \ 270#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
260 for ((leader) = cgroup_taskset_first((tset)); (leader); \ 271 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
261 (leader) = cgroup_taskset_next((tset))) \ 272 (leader); \
273 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
262 if ((leader) != (leader)->group_leader) \ 274 if ((leader) != (leader)->group_leader) \
263 ; \ 275 ; \
264 else 276 else
@@ -516,19 +528,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
516 pr_cont_kernfs_path(cgrp->kn); 528 pr_cont_kernfs_path(cgrp->kn);
517} 529}
518 530
519/**
520 * cgroup_file_notify - generate a file modified event for a cgroup_file
521 * @cfile: target cgroup_file
522 *
523 * @cfile must have been obtained by setting cftype->file_offset.
524 */
525static inline void cgroup_file_notify(struct cgroup_file *cfile)
526{
527 /* might not have been created due to one of the CFTYPE selector flags */
528 if (cfile->kn)
529 kernfs_notify(cfile->kn);
530}
531
532#else /* !CONFIG_CGROUPS */ 531#else /* !CONFIG_CGROUPS */
533 532
534struct cgroup_subsys_state; 533struct cgroup_subsys_state;
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 7be22da321f3..a4cf57cd0f75 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -29,7 +29,11 @@
29/* A few generic types ... taken from ses-2 */ 29/* A few generic types ... taken from ses-2 */
30enum enclosure_component_type { 30enum enclosure_component_type {
31 ENCLOSURE_COMPONENT_DEVICE = 0x01, 31 ENCLOSURE_COMPONENT_DEVICE = 0x01,
32 ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
33 ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
34 ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
32 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, 35 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
36 ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
33}; 37};
34 38
35/* ses-2 common element status */ 39/* ses-2 common element status */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c9ae0c6ec050..d5d798b35c1f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -330,6 +330,7 @@ struct rdists {
330}; 330};
331 331
332struct irq_domain; 332struct irq_domain;
333struct device_node;
333int its_cpu_init(void); 334int its_cpu_init(void);
334int its_init(struct device_node *node, struct rdists *rdists, 335int its_init(struct device_node *node, struct rdists *rdists,
335 struct irq_domain *domain); 336 struct irq_domain *domain);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 8dde55974f18..0536524bb9eb 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -5,7 +5,7 @@
5 * Jump label support 5 * Jump label support
6 * 6 *
7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> 7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
9 * 9 *
10 * DEPRECATED API: 10 * DEPRECATED API:
11 * 11 *
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index d0a1f99e24e3..4894c6888bc6 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -25,7 +25,7 @@
25 25
26#ifdef CONFIG_DEBUG_KMEMLEAK 26#ifdef CONFIG_DEBUG_KMEMLEAK
27 27
28extern void kmemleak_init(void) __ref; 28extern void kmemleak_init(void) __init;
29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
30 gfp_t gfp) __ref; 30 gfp_t gfp) __ref;
31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, 31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 83577f8fd15b..600c1e0626a5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
211 /* (doesn't imply presence) */ 211 /* (doesn't imply presence) */
212 ATA_FLAG_SATA = (1 << 1), 212 ATA_FLAG_SATA = (1 << 1),
213 ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
213 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 214 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
214 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 215 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
215 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 216 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index c6916aec43b6..034117b3be5f 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,9 +50,16 @@ enum {
50 NVM_IO_DUAL_ACCESS = 0x1, 50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2, 51 NVM_IO_QUAD_ACCESS = 0x2,
52 52
53 /* NAND Access Modes */
53 NVM_IO_SUSPEND = 0x80, 54 NVM_IO_SUSPEND = 0x80,
54 NVM_IO_SLC_MODE = 0x100, 55 NVM_IO_SLC_MODE = 0x100,
55 NVM_IO_SCRAMBLE_DISABLE = 0x200, 56 NVM_IO_SCRAMBLE_DISABLE = 0x200,
57
58 /* Block Types */
59 NVM_BLK_T_FREE = 0x0,
60 NVM_BLK_T_BAD = 0x1,
61 NVM_BLK_T_DEV = 0x2,
62 NVM_BLK_T_HOST = 0x4,
56}; 63};
57 64
58struct nvm_id_group { 65struct nvm_id_group {
@@ -176,17 +183,17 @@ struct nvm_block;
176 183
177typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); 184typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
178typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); 185typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
179typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); 186typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
180typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, 187typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
181 nvm_l2p_update_fn *, void *); 188 nvm_l2p_update_fn *, void *);
182typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, 189typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
183 nvm_bb_update_fn *, void *); 190 nvm_bb_update_fn *, void *);
184typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); 191typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
185typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); 192typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
186typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *); 193typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
187typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *); 194typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
188typedef void (nvm_destroy_dma_pool_fn)(void *); 195typedef void (nvm_destroy_dma_pool_fn)(void *);
189typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t, 196typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
190 dma_addr_t *); 197 dma_addr_t *);
191typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); 198typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
192 199
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 70400dc7660f..c57e424d914b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -2,7 +2,7 @@
2 * Runtime locking correctness validator 2 * Runtime locking correctness validator
3 * 3 *
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
6 * 6 *
7 * see Documentation/locking/lockdep-design.txt for more details. 7 * see Documentation/locking/lockdep-design.txt for more details.
8 */ 8 */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7501626ab529..d3133be12d92 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -427,6 +427,17 @@ enum {
427}; 427};
428 428
429enum { 429enum {
430 /*
431 * Max wqe size for rdma read is 512 bytes, so this
432 * limits our max_sge_rd as the wqe needs to fit:
433 * - ctrl segment (16 bytes)
434 * - rdma segment (16 bytes)
435 * - scatter elements (16 bytes each)
436 */
437 MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
438};
439
440enum {
430 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, 441 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
431 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, 442 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
432 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, 443 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 877ef226f90f..772362adf471 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,6 +1,7 @@
1#ifndef LINUX_MM_DEBUG_H 1#ifndef LINUX_MM_DEBUG_H
2#define LINUX_MM_DEBUG_H 1 2#define LINUX_MM_DEBUG_H 1
3 3
4#include <linux/bug.h>
4#include <linux/stringify.h> 5#include <linux/stringify.h>
5 6
6struct page; 7struct page;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3b5d134e945a..3143c847bddb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2084,7 +2084,7 @@ struct pcpu_sw_netstats {
2084}) 2084})
2085 2085
2086#define netdev_alloc_pcpu_stats(type) \ 2086#define netdev_alloc_pcpu_stats(type) \
2087 __netdev_alloc_pcpu_stats(type, GFP_KERNEL); 2087 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2088 2088
2089#include <linux/notifier.h> 2089#include <linux/notifier.h>
2090 2090
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 249d1bb01e03..5646b24bfc64 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,7 +14,7 @@ struct nfnl_callback {
14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb, 14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
15 const struct nlmsghdr *nlh, 15 const struct nlmsghdr *nlh,
16 const struct nlattr * const cda[]); 16 const struct nlattr * const cda[]);
17 int (*call_batch)(struct sock *nl, struct sk_buff *skb, 17 int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
18 const struct nlmsghdr *nlh, 18 const struct nlmsghdr *nlh,
19 const struct nlattr * const cda[]); 19 const struct nlattr * const cda[]);
20 const struct nla_policy *policy; /* netlink attribute policy */ 20 const struct nla_policy *policy; /* netlink attribute policy */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 039f2eec49ce..1e0deb8e8494 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -46,12 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index);
46extern int of_irq_get_byname(struct device_node *dev, const char *name); 46extern int of_irq_get_byname(struct device_node *dev, const char *name);
47extern int of_irq_to_resource_table(struct device_node *dev, 47extern int of_irq_to_resource_table(struct device_node *dev,
48 struct resource *res, int nr_irqs); 48 struct resource *res, int nr_irqs);
49extern struct device_node *of_irq_find_parent(struct device_node *child);
49extern struct irq_domain *of_msi_get_domain(struct device *dev, 50extern struct irq_domain *of_msi_get_domain(struct device *dev,
50 struct device_node *np, 51 struct device_node *np,
51 enum irq_domain_bus_token token); 52 enum irq_domain_bus_token token);
52extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, 53extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
53 u32 rid); 54 u32 rid);
54extern void of_msi_configure(struct device *dev, struct device_node *np); 55extern void of_msi_configure(struct device *dev, struct device_node *np);
56u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
55#else 57#else
56static inline int of_irq_count(struct device_node *dev) 58static inline int of_irq_count(struct device_node *dev)
57{ 59{
@@ -70,6 +72,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
70{ 72{
71 return 0; 73 return 0;
72} 74}
75static inline void *of_irq_find_parent(struct device_node *child)
76{
77 return NULL;
78}
79
73static inline struct irq_domain *of_msi_get_domain(struct device *dev, 80static inline struct irq_domain *of_msi_get_domain(struct device *dev,
74 struct device_node *np, 81 struct device_node *np,
75 enum irq_domain_bus_token token) 82 enum irq_domain_bus_token token)
@@ -84,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
84static inline void of_msi_configure(struct device *dev, struct device_node *np) 91static inline void of_msi_configure(struct device *dev, struct device_node *np)
85{ 92{
86} 93}
94static inline u32 of_msi_map_rid(struct device *dev,
95 struct device_node *msi_np, u32 rid_in)
96{
97 return rid_in;
98}
87#endif 99#endif
88 100
89#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) 101#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
@@ -93,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np)
93 * so declare it here regardless of the CONFIG_OF_IRQ setting. 105 * so declare it here regardless of the CONFIG_OF_IRQ setting.
94 */ 106 */
95extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); 107extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
96u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
97 108
98#else /* !CONFIG_OF && !CONFIG_SPARC */ 109#else /* !CONFIG_OF && !CONFIG_SPARC */
99static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 110static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
@@ -101,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
101{ 112{
102 return 0; 113 return 0;
103} 114}
104
105static inline u32 of_msi_map_rid(struct device *dev,
106 struct device_node *msi_np, u32 rid_in)
107{
108 return rid_in;
109}
110#endif /* !CONFIG_OF */ 115#endif /* !CONFIG_OF */
111 116
112#endif /* __OF_IRQ_H */ 117#endif /* __OF_IRQ_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33bcdc9..f9828a48f16a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
697 * if there is no cgroup event for the current CPU context. 697 * if there is no cgroup event for the current CPU context.
698 */ 698 */
699static inline struct perf_cgroup * 699static inline struct perf_cgroup *
700perf_cgroup_from_task(struct task_struct *task) 700perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
701{ 701{
702 return container_of(task_css(task, perf_event_cgrp_id), 702 return container_of(task_css_check(task, perf_event_cgrp_id,
703 ctx ? lockdep_is_held(&ctx->lock)
704 : true),
703 struct perf_cgroup, css); 705 struct perf_cgroup, css);
704} 706}
705#endif /* CONFIG_CGROUP_PERF */ 707#endif /* CONFIG_CGROUP_PERF */
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index e2878baeb90e..4299f4ba03bd 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -72,7 +72,7 @@ struct edma_soc_info {
72 struct edma_rsv_info *rsv; 72 struct edma_rsv_info *rsv;
73 73
74 /* List of channels allocated for memcpy, terminated with -1 */ 74 /* List of channels allocated for memcpy, terminated with -1 */
75 s16 *memcpy_channels; 75 s32 *memcpy_channels;
76 76
77 s8 (*queue_priority_mapping)[2]; 77 s8 (*queue_priority_mapping)[2];
78 const s16 (*xbar_chans)[2]; 78 const s16 (*xbar_chans)[2];
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5440f64d2942..21221338ad18 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * FLoating proportions 2 * FLoating proportions
3 * 3 *
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * This file contains the public data structure and API definitions. 6 * This file contains the public data structure and API definitions.
7 */ 7 */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 6a4347639c03..1d1ba2c5ee7a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -9,6 +9,8 @@
9#ifndef __COMMON_HSI__ 9#ifndef __COMMON_HSI__
10#define __COMMON_HSI__ 10#define __COMMON_HSI__
11 11
12#define CORE_SPQE_PAGE_SIZE_BYTES 4096
13
12#define FW_MAJOR_VERSION 8 14#define FW_MAJOR_VERSION 8
13#define FW_MINOR_VERSION 4 15#define FW_MINOR_VERSION 4
14#define FW_REVISION_VERSION 2 16#define FW_REVISION_VERSION 2
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index b920c3605c46..41b9049b57e2 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
111 used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - 111 used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
112 (u32)p_chain->cons_idx; 112 (u32)p_chain->cons_idx;
113 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) 113 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
114 used -= (used / p_chain->elem_per_page); 114 used -= p_chain->prod_idx / p_chain->elem_per_page -
115 p_chain->cons_idx / p_chain->elem_per_page;
115 116
116 return p_chain->capacity - used; 117 return p_chain->capacity - used;
117} 118}
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 843ceca9a21e..e50b31d18462 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -19,6 +19,7 @@
19 19
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/err.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/jhash.h> 24#include <linux/jhash.h>
24#include <linux/list_nulls.h> 25#include <linux/list_nulls.h>
@@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
339int rhashtable_init(struct rhashtable *ht, 340int rhashtable_init(struct rhashtable *ht,
340 const struct rhashtable_params *params); 341 const struct rhashtable_params *params);
341 342
342int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
343 struct rhash_head *obj, 344 const void *key,
344 struct bucket_table *old_tbl); 345 struct rhash_head *obj,
345int rhashtable_insert_rehash(struct rhashtable *ht); 346 struct bucket_table *old_tbl);
347int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
346 348
347int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); 349int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
348void rhashtable_walk_exit(struct rhashtable_iter *iter); 350void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -598,9 +600,11 @@ restart:
598 600
599 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); 601 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
600 if (unlikely(new_tbl)) { 602 if (unlikely(new_tbl)) {
601 err = rhashtable_insert_slow(ht, key, obj, new_tbl); 603 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
602 if (err == -EAGAIN) 604 if (!IS_ERR_OR_NULL(tbl))
603 goto slow_path; 605 goto slow_path;
606
607 err = PTR_ERR(tbl);
604 goto out; 608 goto out;
605 } 609 }
606 610
@@ -611,7 +615,7 @@ restart:
611 if (unlikely(rht_grow_above_100(ht, tbl))) { 615 if (unlikely(rht_grow_above_100(ht, tbl))) {
612slow_path: 616slow_path:
613 spin_unlock_bh(lock); 617 spin_unlock_bh(lock);
614 err = rhashtable_insert_rehash(ht); 618 err = rhashtable_insert_rehash(ht, tbl);
615 rcu_read_unlock(); 619 rcu_read_unlock();
616 if (err) 620 if (err)
617 return err; 621 return err;
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 0adedca24c5b..0e1b1540597a 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
99 * grabbing every spinlock (and more). So the "read" side to such a 99 * grabbing every spinlock (and more). So the "read" side to such a
100 * lock is anything which disables preemption. 100 * lock is anything which disables preemption.
101 */ 101 */
102#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) 102#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
103 103
104/** 104/**
105 * stop_machine: freeze the machine on all CPUs and run this function 105 * stop_machine: freeze the machine on all CPUs and run this function
@@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
118 118
119int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, 119int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
120 const struct cpumask *cpus); 120 const struct cpumask *cpus);
121#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 121#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
122 122
123static inline int stop_machine(cpu_stop_fn_t fn, void *data, 123static inline int stop_machine(cpu_stop_fn_t fn, void *data,
124 const struct cpumask *cpus) 124 const struct cpumask *cpus)
@@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
137 return stop_machine(fn, data, cpus); 137 return stop_machine(fn, data, cpus);
138} 138}
139 139
140#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 140#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
141#endif /* _LINUX_STOP_MACHINE */ 141#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 0bdc72f36905..4a29c75b146e 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -21,7 +21,7 @@
21 * Authors: 21 * Authors:
22 * Srikar Dronamraju 22 * Srikar Dronamraju
23 * Jim Keniston 23 * Jim Keniston
24 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 24 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
25 */ 25 */
26 26
27#include <linux/errno.h> 27#include <linux/errno.h>
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 9948c874e3f1..1d0043dc34e4 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -47,4 +47,7 @@
47/* device generates spurious wakeup, ignore remote wakeup capability */ 47/* device generates spurious wakeup, ignore remote wakeup capability */
48#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) 48#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
49 49
50/* device can't handle Link Power Management */
51#define USB_QUIRK_NO_LPM BIT(10)
52
50#endif /* __LINUX_USB_QUIRKS_H */ 53#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 610a86a892b8..ddb440975382 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -44,9 +44,6 @@ struct vfio_device_ops {
44 void (*request)(void *device_data, unsigned int count); 44 void (*request)(void *device_data, unsigned int count);
45}; 45};
46 46
47extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
48extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
49
50extern int vfio_add_group_dev(struct device *dev, 47extern int vfio_add_group_dev(struct device *dev,
51 const struct vfio_device_ops *ops, 48 const struct vfio_device_ops *ops,
52 void *device_data); 49 void *device_data);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f963a9..513b36f04dfd 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -145,7 +145,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
145 list_del(&old->task_list); 145 list_del(&old->task_list);
146} 146}
147 147
148typedef int wait_bit_action_f(struct wait_bit_key *); 148typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
@@ -960,10 +960,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
960 } while (0) 960 } while (0)
961 961
962 962
963extern int bit_wait(struct wait_bit_key *); 963extern int bit_wait(struct wait_bit_key *, int);
964extern int bit_wait_io(struct wait_bit_key *); 964extern int bit_wait_io(struct wait_bit_key *, int);
965extern int bit_wait_timeout(struct wait_bit_key *); 965extern int bit_wait_timeout(struct wait_bit_key *, int);
966extern int bit_wait_io_timeout(struct wait_bit_key *); 966extern int bit_wait_io_timeout(struct wait_bit_key *, int);
967 967
968/** 968/**
969 * wait_on_bit - wait for a bit to be cleared 969 * wait_on_bit - wait for a bit to be cleared
diff --git a/include/net/dst.h b/include/net/dst.h
index 1279f9b09791..c7329dcd90cc 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -322,6 +322,39 @@ static inline void skb_dst_force(struct sk_buff *skb)
322 } 322 }
323} 323}
324 324
325/**
326 * dst_hold_safe - Take a reference on a dst if possible
327 * @dst: pointer to dst entry
328 *
329 * This helper returns false if it could not safely
330 * take a reference on a dst.
331 */
332static inline bool dst_hold_safe(struct dst_entry *dst)
333{
334 if (dst->flags & DST_NOCACHE)
335 return atomic_inc_not_zero(&dst->__refcnt);
336 dst_hold(dst);
337 return true;
338}
339
340/**
341 * skb_dst_force_safe - makes sure skb dst is refcounted
342 * @skb: buffer
343 *
344 * If dst is not yet refcounted and not destroyed, grab a ref on it.
345 */
346static inline void skb_dst_force_safe(struct sk_buff *skb)
347{
348 if (skb_dst_is_noref(skb)) {
349 struct dst_entry *dst = skb_dst(skb);
350
351 if (!dst_hold_safe(dst))
352 dst = NULL;
353
354 skb->_skb_refdst = (unsigned long)dst;
355 }
356}
357
325 358
326/** 359/**
327 * __skb_tunnel_rx - prepare skb for rx reinsert 360 * __skb_tunnel_rx - prepare skb for rx reinsert
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 2134e6d815bc..625bdf95d673 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -210,18 +210,37 @@ struct inet_sock {
210#define IP_CMSG_ORIGDSTADDR BIT(6) 210#define IP_CMSG_ORIGDSTADDR BIT(6)
211#define IP_CMSG_CHECKSUM BIT(7) 211#define IP_CMSG_CHECKSUM BIT(7)
212 212
213/* SYNACK messages might be attached to request sockets. 213/**
214 * sk_to_full_sk - Access to a full socket
215 * @sk: pointer to a socket
216 *
217 * SYNACK messages might be attached to request sockets.
214 * Some places want to reach the listener in this case. 218 * Some places want to reach the listener in this case.
215 */ 219 */
216static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) 220static inline struct sock *sk_to_full_sk(struct sock *sk)
217{ 221{
218 struct sock *sk = skb->sk; 222#ifdef CONFIG_INET
219
220 if (sk && sk->sk_state == TCP_NEW_SYN_RECV) 223 if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
221 sk = inet_reqsk(sk)->rsk_listener; 224 sk = inet_reqsk(sk)->rsk_listener;
225#endif
226 return sk;
227}
228
229/* sk_to_full_sk() variant with a const argument */
230static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
231{
232#ifdef CONFIG_INET
233 if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
234 sk = ((const struct request_sock *)sk)->rsk_listener;
235#endif
222 return sk; 236 return sk;
223} 237}
224 238
239static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
240{
241 return sk_to_full_sk(skb->sk);
242}
243
225static inline struct inet_sock *inet_sk(const struct sock *sk) 244static inline struct inet_sock *inet_sk(const struct sock *sk)
226{ 245{
227 return (struct inet_sock *)sk; 246 return (struct inet_sock *)sk;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 4a6009d4486b..235c7811a86a 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -78,6 +78,7 @@ void inet_initpeers(void) __init;
78static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) 78static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
79{ 79{
80 iaddr->a4.addr = ip; 80 iaddr->a4.addr = ip;
81 iaddr->a4.vif = 0;
81 iaddr->family = AF_INET; 82 iaddr->family = AF_INET;
82} 83}
83 84
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 7bbb71081aeb..eea9bdeecba2 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1493,7 +1493,8 @@ struct sctp_association {
1493 * : SACK's are not delayed (see Section 6). 1493 * : SACK's are not delayed (see Section 6).
1494 */ 1494 */
1495 __u8 sack_needed:1, /* Do we need to sack the peer? */ 1495 __u8 sack_needed:1, /* Do we need to sack the peer? */
1496 sack_generation:1; 1496 sack_generation:1,
1497 zero_window_announced:1;
1497 __u32 sack_cnt; 1498 __u32 sack_cnt;
1498 1499
1499 __u32 adaptation_ind; /* Adaptation Code point. */ 1500 __u32 adaptation_ind; /* Adaptation Code point. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 52d27ee924f4..14d3c0734007 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -388,7 +388,7 @@ struct sock {
388 struct socket_wq *sk_wq_raw; 388 struct socket_wq *sk_wq_raw;
389 }; 389 };
390#ifdef CONFIG_XFRM 390#ifdef CONFIG_XFRM
391 struct xfrm_policy *sk_policy[2]; 391 struct xfrm_policy __rcu *sk_policy[2];
392#endif 392#endif
393 struct dst_entry *sk_rx_dst; 393 struct dst_entry *sk_rx_dst;
394 struct dst_entry __rcu *sk_dst_cache; 394 struct dst_entry __rcu *sk_dst_cache;
@@ -404,6 +404,7 @@ struct sock {
404 sk_userlocks : 4, 404 sk_userlocks : 4,
405 sk_protocol : 8, 405 sk_protocol : 8,
406 sk_type : 16; 406 sk_type : 16;
407#define SK_PROTOCOL_MAX U8_MAX
407 kmemcheck_bitfield_end(flags); 408 kmemcheck_bitfield_end(flags);
408 int sk_wmem_queued; 409 int sk_wmem_queued;
409 gfp_t sk_allocation; 410 gfp_t sk_allocation;
@@ -740,6 +741,8 @@ enum sock_flags {
740 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ 741 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
741}; 742};
742 743
744#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
745
743static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 746static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
744{ 747{
745 nsk->sk_flags = osk->sk_flags; 748 nsk->sk_flags = osk->sk_flags;
@@ -814,7 +817,7 @@ void sk_stream_write_space(struct sock *sk);
814static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 817static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
815{ 818{
816 /* dont let skb dst not refcounted, we are going to leave rcu lock */ 819 /* dont let skb dst not refcounted, we are going to leave rcu lock */
817 skb_dst_force(skb); 820 skb_dst_force_safe(skb);
818 821
819 if (!sk->sk_backlog.tail) 822 if (!sk->sk_backlog.tail)
820 sk->sk_backlog.head = skb; 823 sk->sk_backlog.head = skb;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index c1c899c3a51b..e289ada6adf6 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -79,7 +79,7 @@ struct vxlanhdr {
79}; 79};
80 80
81/* VXLAN header flags. */ 81/* VXLAN header flags. */
82#define VXLAN_HF_RCO BIT(24) 82#define VXLAN_HF_RCO BIT(21)
83#define VXLAN_HF_VNI BIT(27) 83#define VXLAN_HF_VNI BIT(27)
84#define VXLAN_HF_GBP BIT(31) 84#define VXLAN_HF_GBP BIT(31)
85 85
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 4a9c21f9b4ea..d6f6e5006ee9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -548,6 +548,7 @@ struct xfrm_policy {
548 u16 family; 548 u16 family;
549 struct xfrm_sec_ctx *security; 549 struct xfrm_sec_ctx *security;
550 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 550 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
551 struct rcu_head rcu;
551}; 552};
552 553
553static inline struct net *xp_net(const struct xfrm_policy *xp) 554static inline struct net *xp_net(const struct xfrm_policy *xp)
@@ -1141,12 +1142,14 @@ static inline int xfrm6_route_forward(struct sk_buff *skb)
1141 return xfrm_route_forward(skb, AF_INET6); 1142 return xfrm_route_forward(skb, AF_INET6);
1142} 1143}
1143 1144
1144int __xfrm_sk_clone_policy(struct sock *sk); 1145int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1145 1146
1146static inline int xfrm_sk_clone_policy(struct sock *sk) 1147static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1147{ 1148{
1148 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) 1149 sk->sk_policy[0] = NULL;
1149 return __xfrm_sk_clone_policy(sk); 1150 sk->sk_policy[1] = NULL;
1151 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1152 return __xfrm_sk_clone_policy(sk, osk);
1150 return 0; 1153 return 0;
1151} 1154}
1152 1155
@@ -1154,12 +1157,16 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1154 1157
1155static inline void xfrm_sk_free_policy(struct sock *sk) 1158static inline void xfrm_sk_free_policy(struct sock *sk)
1156{ 1159{
1157 if (unlikely(sk->sk_policy[0] != NULL)) { 1160 struct xfrm_policy *pol;
1158 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX); 1161
1162 pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1163 if (unlikely(pol != NULL)) {
1164 xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1159 sk->sk_policy[0] = NULL; 1165 sk->sk_policy[0] = NULL;
1160 } 1166 }
1161 if (unlikely(sk->sk_policy[1] != NULL)) { 1167 pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1162 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1); 1168 if (unlikely(pol != NULL)) {
1169 xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1163 sk->sk_policy[1] = NULL; 1170 sk->sk_policy[1] = NULL;
1164 } 1171 }
1165} 1172}
@@ -1169,7 +1176,7 @@ void xfrm_garbage_collect(struct net *net);
1169#else 1176#else
1170 1177
1171static inline void xfrm_sk_free_policy(struct sock *sk) {} 1178static inline void xfrm_sk_free_policy(struct sock *sk) {}
1172static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; } 1179static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
1173static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } 1180static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1174static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } 1181static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1175static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1182static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 188df91d5851..ec9b44dd3d80 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -237,6 +237,8 @@ struct ib_vendor_mad {
237 u8 data[IB_MGMT_VENDOR_DATA]; 237 u8 data[IB_MGMT_VENDOR_DATA];
238}; 238};
239 239
240#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
241
240struct ib_class_port_info { 242struct ib_class_port_info {
241 u8 base_version; 243 u8 base_version;
242 u8 class_version; 244 u8 class_version;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9a68a19532ba..120da1d7f57e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1271,6 +1271,7 @@ struct ib_uobject {
1271 int id; /* index into kernel idr */ 1271 int id; /* index into kernel idr */
1272 struct kref ref; 1272 struct kref ref;
1273 struct rw_semaphore mutex; /* protects .live */ 1273 struct rw_semaphore mutex; /* protects .live */
1274 struct rcu_head rcu; /* kfree_rcu() overhead */
1274 int live; 1275 int live;
1275}; 1276};
1276 1277
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 2ae8812d7b1a..94dc6a9772e0 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -93,6 +93,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
93#define AZX_REG_HSW_EM4 0x100c 93#define AZX_REG_HSW_EM4 0x100c
94#define AZX_REG_HSW_EM5 0x1010 94#define AZX_REG_HSW_EM5 0x1010
95 95
96/* Skylake/Broxton display HD-A controller Extended Mode registers */
97#define AZX_REG_SKL_EM4L 0x1040
98
96/* PCI space */ 99/* PCI space */
97#define AZX_PCIREG_TCSEL 0x44 100#define AZX_PCIREG_TCSEL 0x44
98 101
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 38d437096c35..9355dd8eff3b 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -3,6 +3,7 @@ header-y += drm.h
3header-y += drm_fourcc.h 3header-y += drm_fourcc.h
4header-y += drm_mode.h 4header-y += drm_mode.h
5header-y += drm_sarea.h 5header-y += drm_sarea.h
6header-y += amdgpu_drm.h
6header-y += exynos_drm.h 7header-y += exynos_drm.h
7header-y += i810_drm.h 8header-y += i810_drm.h
8header-y += i915_drm.h 9header-y += i915_drm.h
@@ -17,4 +18,5 @@ header-y += tegra_drm.h
17header-y += via_drm.h 18header-y += via_drm.h
18header-y += vmwgfx_drm.h 19header-y += vmwgfx_drm.h
19header-y += msm_drm.h 20header-y += msm_drm.h
21header-y += vc4_drm.h
20header-y += virtgpu_drm.h 22header-y += virtgpu_drm.h
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index e52933a73580..453a76af123c 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -76,19 +76,19 @@
76 76
77struct drm_amdgpu_gem_create_in { 77struct drm_amdgpu_gem_create_in {
78 /** the requested memory size */ 78 /** the requested memory size */
79 uint64_t bo_size; 79 __u64 bo_size;
80 /** physical start_addr alignment in bytes for some HW requirements */ 80 /** physical start_addr alignment in bytes for some HW requirements */
81 uint64_t alignment; 81 __u64 alignment;
82 /** the requested memory domains */ 82 /** the requested memory domains */
83 uint64_t domains; 83 __u64 domains;
84 /** allocation flags */ 84 /** allocation flags */
85 uint64_t domain_flags; 85 __u64 domain_flags;
86}; 86};
87 87
88struct drm_amdgpu_gem_create_out { 88struct drm_amdgpu_gem_create_out {
89 /** returned GEM object handle */ 89 /** returned GEM object handle */
90 uint32_t handle; 90 __u32 handle;
91 uint32_t _pad; 91 __u32 _pad;
92}; 92};
93 93
94union drm_amdgpu_gem_create { 94union drm_amdgpu_gem_create {
@@ -105,28 +105,28 @@ union drm_amdgpu_gem_create {
105 105
106struct drm_amdgpu_bo_list_in { 106struct drm_amdgpu_bo_list_in {
107 /** Type of operation */ 107 /** Type of operation */
108 uint32_t operation; 108 __u32 operation;
109 /** Handle of list or 0 if we want to create one */ 109 /** Handle of list or 0 if we want to create one */
110 uint32_t list_handle; 110 __u32 list_handle;
111 /** Number of BOs in list */ 111 /** Number of BOs in list */
112 uint32_t bo_number; 112 __u32 bo_number;
113 /** Size of each element describing BO */ 113 /** Size of each element describing BO */
114 uint32_t bo_info_size; 114 __u32 bo_info_size;
115 /** Pointer to array describing BOs */ 115 /** Pointer to array describing BOs */
116 uint64_t bo_info_ptr; 116 __u64 bo_info_ptr;
117}; 117};
118 118
119struct drm_amdgpu_bo_list_entry { 119struct drm_amdgpu_bo_list_entry {
120 /** Handle of BO */ 120 /** Handle of BO */
121 uint32_t bo_handle; 121 __u32 bo_handle;
122 /** New (if specified) BO priority to be used during migration */ 122 /** New (if specified) BO priority to be used during migration */
123 uint32_t bo_priority; 123 __u32 bo_priority;
124}; 124};
125 125
126struct drm_amdgpu_bo_list_out { 126struct drm_amdgpu_bo_list_out {
127 /** Handle of resource list */ 127 /** Handle of resource list */
128 uint32_t list_handle; 128 __u32 list_handle;
129 uint32_t _pad; 129 __u32 _pad;
130}; 130};
131 131
132union drm_amdgpu_bo_list { 132union drm_amdgpu_bo_list {
@@ -150,26 +150,26 @@ union drm_amdgpu_bo_list {
150 150
151struct drm_amdgpu_ctx_in { 151struct drm_amdgpu_ctx_in {
152 /** AMDGPU_CTX_OP_* */ 152 /** AMDGPU_CTX_OP_* */
153 uint32_t op; 153 __u32 op;
154 /** For future use, no flags defined so far */ 154 /** For future use, no flags defined so far */
155 uint32_t flags; 155 __u32 flags;
156 uint32_t ctx_id; 156 __u32 ctx_id;
157 uint32_t _pad; 157 __u32 _pad;
158}; 158};
159 159
160union drm_amdgpu_ctx_out { 160union drm_amdgpu_ctx_out {
161 struct { 161 struct {
162 uint32_t ctx_id; 162 __u32 ctx_id;
163 uint32_t _pad; 163 __u32 _pad;
164 } alloc; 164 } alloc;
165 165
166 struct { 166 struct {
167 /** For future use, no flags defined so far */ 167 /** For future use, no flags defined so far */
168 uint64_t flags; 168 __u64 flags;
169 /** Number of resets caused by this context so far. */ 169 /** Number of resets caused by this context so far. */
170 uint32_t hangs; 170 __u32 hangs;
171 /** Reset status since the last call of the ioctl. */ 171 /** Reset status since the last call of the ioctl. */
172 uint32_t reset_status; 172 __u32 reset_status;
173 } state; 173 } state;
174}; 174};
175 175
@@ -189,12 +189,12 @@ union drm_amdgpu_ctx {
189#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 189#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
190 190
191struct drm_amdgpu_gem_userptr { 191struct drm_amdgpu_gem_userptr {
192 uint64_t addr; 192 __u64 addr;
193 uint64_t size; 193 __u64 size;
194 /* AMDGPU_GEM_USERPTR_* */ 194 /* AMDGPU_GEM_USERPTR_* */
195 uint32_t flags; 195 __u32 flags;
196 /* Resulting GEM handle */ 196 /* Resulting GEM handle */
197 uint32_t handle; 197 __u32 handle;
198}; 198};
199 199
200/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ 200/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
@@ -226,28 +226,28 @@ struct drm_amdgpu_gem_userptr {
226/** The same structure is shared for input/output */ 226/** The same structure is shared for input/output */
227struct drm_amdgpu_gem_metadata { 227struct drm_amdgpu_gem_metadata {
228 /** GEM Object handle */ 228 /** GEM Object handle */
229 uint32_t handle; 229 __u32 handle;
230 /** Do we want get or set metadata */ 230 /** Do we want get or set metadata */
231 uint32_t op; 231 __u32 op;
232 struct { 232 struct {
233 /** For future use, no flags defined so far */ 233 /** For future use, no flags defined so far */
234 uint64_t flags; 234 __u64 flags;
235 /** family specific tiling info */ 235 /** family specific tiling info */
236 uint64_t tiling_info; 236 __u64 tiling_info;
237 uint32_t data_size_bytes; 237 __u32 data_size_bytes;
238 uint32_t data[64]; 238 __u32 data[64];
239 } data; 239 } data;
240}; 240};
241 241
242struct drm_amdgpu_gem_mmap_in { 242struct drm_amdgpu_gem_mmap_in {
243 /** the GEM object handle */ 243 /** the GEM object handle */
244 uint32_t handle; 244 __u32 handle;
245 uint32_t _pad; 245 __u32 _pad;
246}; 246};
247 247
248struct drm_amdgpu_gem_mmap_out { 248struct drm_amdgpu_gem_mmap_out {
249 /** mmap offset from the vma offset manager */ 249 /** mmap offset from the vma offset manager */
250 uint64_t addr_ptr; 250 __u64 addr_ptr;
251}; 251};
252 252
253union drm_amdgpu_gem_mmap { 253union drm_amdgpu_gem_mmap {
@@ -257,18 +257,18 @@ union drm_amdgpu_gem_mmap {
257 257
258struct drm_amdgpu_gem_wait_idle_in { 258struct drm_amdgpu_gem_wait_idle_in {
259 /** GEM object handle */ 259 /** GEM object handle */
260 uint32_t handle; 260 __u32 handle;
261 /** For future use, no flags defined so far */ 261 /** For future use, no flags defined so far */
262 uint32_t flags; 262 __u32 flags;
263 /** Absolute timeout to wait */ 263 /** Absolute timeout to wait */
264 uint64_t timeout; 264 __u64 timeout;
265}; 265};
266 266
267struct drm_amdgpu_gem_wait_idle_out { 267struct drm_amdgpu_gem_wait_idle_out {
268 /** BO status: 0 - BO is idle, 1 - BO is busy */ 268 /** BO status: 0 - BO is idle, 1 - BO is busy */
269 uint32_t status; 269 __u32 status;
270 /** Returned current memory domain */ 270 /** Returned current memory domain */
271 uint32_t domain; 271 __u32 domain;
272}; 272};
273 273
274union drm_amdgpu_gem_wait_idle { 274union drm_amdgpu_gem_wait_idle {
@@ -278,18 +278,18 @@ union drm_amdgpu_gem_wait_idle {
278 278
279struct drm_amdgpu_wait_cs_in { 279struct drm_amdgpu_wait_cs_in {
280 /** Command submission handle */ 280 /** Command submission handle */
281 uint64_t handle; 281 __u64 handle;
282 /** Absolute timeout to wait */ 282 /** Absolute timeout to wait */
283 uint64_t timeout; 283 __u64 timeout;
284 uint32_t ip_type; 284 __u32 ip_type;
285 uint32_t ip_instance; 285 __u32 ip_instance;
286 uint32_t ring; 286 __u32 ring;
287 uint32_t ctx_id; 287 __u32 ctx_id;
288}; 288};
289 289
290struct drm_amdgpu_wait_cs_out { 290struct drm_amdgpu_wait_cs_out {
291 /** CS status: 0 - CS completed, 1 - CS still busy */ 291 /** CS status: 0 - CS completed, 1 - CS still busy */
292 uint64_t status; 292 __u64 status;
293}; 293};
294 294
295union drm_amdgpu_wait_cs { 295union drm_amdgpu_wait_cs {
@@ -303,11 +303,11 @@ union drm_amdgpu_wait_cs {
303/* Sets or returns a value associated with a buffer. */ 303/* Sets or returns a value associated with a buffer. */
304struct drm_amdgpu_gem_op { 304struct drm_amdgpu_gem_op {
305 /** GEM object handle */ 305 /** GEM object handle */
306 uint32_t handle; 306 __u32 handle;
307 /** AMDGPU_GEM_OP_* */ 307 /** AMDGPU_GEM_OP_* */
308 uint32_t op; 308 __u32 op;
309 /** Input or return value */ 309 /** Input or return value */
310 uint64_t value; 310 __u64 value;
311}; 311};
312 312
313#define AMDGPU_VA_OP_MAP 1 313#define AMDGPU_VA_OP_MAP 1
@@ -326,18 +326,18 @@ struct drm_amdgpu_gem_op {
326 326
327struct drm_amdgpu_gem_va { 327struct drm_amdgpu_gem_va {
328 /** GEM object handle */ 328 /** GEM object handle */
329 uint32_t handle; 329 __u32 handle;
330 uint32_t _pad; 330 __u32 _pad;
331 /** AMDGPU_VA_OP_* */ 331 /** AMDGPU_VA_OP_* */
332 uint32_t operation; 332 __u32 operation;
333 /** AMDGPU_VM_PAGE_* */ 333 /** AMDGPU_VM_PAGE_* */
334 uint32_t flags; 334 __u32 flags;
335 /** va address to assign . Must be correctly aligned.*/ 335 /** va address to assign . Must be correctly aligned.*/
336 uint64_t va_address; 336 __u64 va_address;
337 /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 337 /** Specify offset inside of BO to assign. Must be correctly aligned.*/
338 uint64_t offset_in_bo; 338 __u64 offset_in_bo;
339 /** Specify mapping size. Must be correctly aligned. */ 339 /** Specify mapping size. Must be correctly aligned. */
340 uint64_t map_size; 340 __u64 map_size;
341}; 341};
342 342
343#define AMDGPU_HW_IP_GFX 0 343#define AMDGPU_HW_IP_GFX 0
@@ -354,24 +354,24 @@ struct drm_amdgpu_gem_va {
354#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 354#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
355 355
356struct drm_amdgpu_cs_chunk { 356struct drm_amdgpu_cs_chunk {
357 uint32_t chunk_id; 357 __u32 chunk_id;
358 uint32_t length_dw; 358 __u32 length_dw;
359 uint64_t chunk_data; 359 __u64 chunk_data;
360}; 360};
361 361
362struct drm_amdgpu_cs_in { 362struct drm_amdgpu_cs_in {
363 /** Rendering context id */ 363 /** Rendering context id */
364 uint32_t ctx_id; 364 __u32 ctx_id;
365 /** Handle of resource list associated with CS */ 365 /** Handle of resource list associated with CS */
366 uint32_t bo_list_handle; 366 __u32 bo_list_handle;
367 uint32_t num_chunks; 367 __u32 num_chunks;
368 uint32_t _pad; 368 __u32 _pad;
369 /** this points to uint64_t * which point to cs chunks */ 369 /** this points to __u64 * which point to cs chunks */
370 uint64_t chunks; 370 __u64 chunks;
371}; 371};
372 372
373struct drm_amdgpu_cs_out { 373struct drm_amdgpu_cs_out {
374 uint64_t handle; 374 __u64 handle;
375}; 375};
376 376
377union drm_amdgpu_cs { 377union drm_amdgpu_cs {
@@ -388,32 +388,32 @@ union drm_amdgpu_cs {
388#define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 388#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
389 389
390struct drm_amdgpu_cs_chunk_ib { 390struct drm_amdgpu_cs_chunk_ib {
391 uint32_t _pad; 391 __u32 _pad;
392 /** AMDGPU_IB_FLAG_* */ 392 /** AMDGPU_IB_FLAG_* */
393 uint32_t flags; 393 __u32 flags;
394 /** Virtual address to begin IB execution */ 394 /** Virtual address to begin IB execution */
395 uint64_t va_start; 395 __u64 va_start;
396 /** Size of submission */ 396 /** Size of submission */
397 uint32_t ib_bytes; 397 __u32 ib_bytes;
398 /** HW IP to submit to */ 398 /** HW IP to submit to */
399 uint32_t ip_type; 399 __u32 ip_type;
400 /** HW IP index of the same type to submit to */ 400 /** HW IP index of the same type to submit to */
401 uint32_t ip_instance; 401 __u32 ip_instance;
402 /** Ring index to submit to */ 402 /** Ring index to submit to */
403 uint32_t ring; 403 __u32 ring;
404}; 404};
405 405
406struct drm_amdgpu_cs_chunk_dep { 406struct drm_amdgpu_cs_chunk_dep {
407 uint32_t ip_type; 407 __u32 ip_type;
408 uint32_t ip_instance; 408 __u32 ip_instance;
409 uint32_t ring; 409 __u32 ring;
410 uint32_t ctx_id; 410 __u32 ctx_id;
411 uint64_t handle; 411 __u64 handle;
412}; 412};
413 413
414struct drm_amdgpu_cs_chunk_fence { 414struct drm_amdgpu_cs_chunk_fence {
415 uint32_t handle; 415 __u32 handle;
416 uint32_t offset; 416 __u32 offset;
417}; 417};
418 418
419struct drm_amdgpu_cs_chunk_data { 419struct drm_amdgpu_cs_chunk_data {
@@ -486,83 +486,83 @@ struct drm_amdgpu_cs_chunk_data {
486/* Input structure for the INFO ioctl */ 486/* Input structure for the INFO ioctl */
487struct drm_amdgpu_info { 487struct drm_amdgpu_info {
488 /* Where the return value will be stored */ 488 /* Where the return value will be stored */
489 uint64_t return_pointer; 489 __u64 return_pointer;
490 /* The size of the return value. Just like "size" in "snprintf", 490 /* The size of the return value. Just like "size" in "snprintf",
491 * it limits how many bytes the kernel can write. */ 491 * it limits how many bytes the kernel can write. */
492 uint32_t return_size; 492 __u32 return_size;
493 /* The query request id. */ 493 /* The query request id. */
494 uint32_t query; 494 __u32 query;
495 495
496 union { 496 union {
497 struct { 497 struct {
498 uint32_t id; 498 __u32 id;
499 uint32_t _pad; 499 __u32 _pad;
500 } mode_crtc; 500 } mode_crtc;
501 501
502 struct { 502 struct {
503 /** AMDGPU_HW_IP_* */ 503 /** AMDGPU_HW_IP_* */
504 uint32_t type; 504 __u32 type;
505 /** 505 /**
506 * Index of the IP if there are more IPs of the same 506 * Index of the IP if there are more IPs of the same
507 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 507 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
508 */ 508 */
509 uint32_t ip_instance; 509 __u32 ip_instance;
510 } query_hw_ip; 510 } query_hw_ip;
511 511
512 struct { 512 struct {
513 uint32_t dword_offset; 513 __u32 dword_offset;
514 /** number of registers to read */ 514 /** number of registers to read */
515 uint32_t count; 515 __u32 count;
516 uint32_t instance; 516 __u32 instance;
517 /** For future use, no flags defined so far */ 517 /** For future use, no flags defined so far */
518 uint32_t flags; 518 __u32 flags;
519 } read_mmr_reg; 519 } read_mmr_reg;
520 520
521 struct { 521 struct {
522 /** AMDGPU_INFO_FW_* */ 522 /** AMDGPU_INFO_FW_* */
523 uint32_t fw_type; 523 __u32 fw_type;
524 /** 524 /**
525 * Index of the IP if there are more IPs of 525 * Index of the IP if there are more IPs of
526 * the same type. 526 * the same type.
527 */ 527 */
528 uint32_t ip_instance; 528 __u32 ip_instance;
529 /** 529 /**
530 * Index of the engine. Whether this is used depends 530 * Index of the engine. Whether this is used depends
531 * on the firmware type. (e.g. MEC, SDMA) 531 * on the firmware type. (e.g. MEC, SDMA)
532 */ 532 */
533 uint32_t index; 533 __u32 index;
534 uint32_t _pad; 534 __u32 _pad;
535 } query_fw; 535 } query_fw;
536 }; 536 };
537}; 537};
538 538
539struct drm_amdgpu_info_gds { 539struct drm_amdgpu_info_gds {
540 /** GDS GFX partition size */ 540 /** GDS GFX partition size */
541 uint32_t gds_gfx_partition_size; 541 __u32 gds_gfx_partition_size;
542 /** GDS compute partition size */ 542 /** GDS compute partition size */
543 uint32_t compute_partition_size; 543 __u32 compute_partition_size;
544 /** total GDS memory size */ 544 /** total GDS memory size */
545 uint32_t gds_total_size; 545 __u32 gds_total_size;
546 /** GWS size per GFX partition */ 546 /** GWS size per GFX partition */
547 uint32_t gws_per_gfx_partition; 547 __u32 gws_per_gfx_partition;
548 /** GSW size per compute partition */ 548 /** GSW size per compute partition */
549 uint32_t gws_per_compute_partition; 549 __u32 gws_per_compute_partition;
550 /** OA size per GFX partition */ 550 /** OA size per GFX partition */
551 uint32_t oa_per_gfx_partition; 551 __u32 oa_per_gfx_partition;
552 /** OA size per compute partition */ 552 /** OA size per compute partition */
553 uint32_t oa_per_compute_partition; 553 __u32 oa_per_compute_partition;
554 uint32_t _pad; 554 __u32 _pad;
555}; 555};
556 556
557struct drm_amdgpu_info_vram_gtt { 557struct drm_amdgpu_info_vram_gtt {
558 uint64_t vram_size; 558 __u64 vram_size;
559 uint64_t vram_cpu_accessible_size; 559 __u64 vram_cpu_accessible_size;
560 uint64_t gtt_size; 560 __u64 gtt_size;
561}; 561};
562 562
563struct drm_amdgpu_info_firmware { 563struct drm_amdgpu_info_firmware {
564 uint32_t ver; 564 __u32 ver;
565 uint32_t feature; 565 __u32 feature;
566}; 566};
567 567
568#define AMDGPU_VRAM_TYPE_UNKNOWN 0 568#define AMDGPU_VRAM_TYPE_UNKNOWN 0
@@ -576,61 +576,61 @@ struct drm_amdgpu_info_firmware {
576 576
577struct drm_amdgpu_info_device { 577struct drm_amdgpu_info_device {
578 /** PCI Device ID */ 578 /** PCI Device ID */
579 uint32_t device_id; 579 __u32 device_id;
580 /** Internal chip revision: A0, A1, etc.) */ 580 /** Internal chip revision: A0, A1, etc.) */
581 uint32_t chip_rev; 581 __u32 chip_rev;
582 uint32_t external_rev; 582 __u32 external_rev;
583 /** Revision id in PCI Config space */ 583 /** Revision id in PCI Config space */
584 uint32_t pci_rev; 584 __u32 pci_rev;
585 uint32_t family; 585 __u32 family;
586 uint32_t num_shader_engines; 586 __u32 num_shader_engines;
587 uint32_t num_shader_arrays_per_engine; 587 __u32 num_shader_arrays_per_engine;
588 /* in KHz */ 588 /* in KHz */
589 uint32_t gpu_counter_freq; 589 __u32 gpu_counter_freq;
590 uint64_t max_engine_clock; 590 __u64 max_engine_clock;
591 uint64_t max_memory_clock; 591 __u64 max_memory_clock;
592 /* cu information */ 592 /* cu information */
593 uint32_t cu_active_number; 593 __u32 cu_active_number;
594 uint32_t cu_ao_mask; 594 __u32 cu_ao_mask;
595 uint32_t cu_bitmap[4][4]; 595 __u32 cu_bitmap[4][4];
596 /** Render backend pipe mask. One render backend is CB+DB. */ 596 /** Render backend pipe mask. One render backend is CB+DB. */
597 uint32_t enabled_rb_pipes_mask; 597 __u32 enabled_rb_pipes_mask;
598 uint32_t num_rb_pipes; 598 __u32 num_rb_pipes;
599 uint32_t num_hw_gfx_contexts; 599 __u32 num_hw_gfx_contexts;
600 uint32_t _pad; 600 __u32 _pad;
601 uint64_t ids_flags; 601 __u64 ids_flags;
602 /** Starting virtual address for UMDs. */ 602 /** Starting virtual address for UMDs. */
603 uint64_t virtual_address_offset; 603 __u64 virtual_address_offset;
604 /** The maximum virtual address */ 604 /** The maximum virtual address */
605 uint64_t virtual_address_max; 605 __u64 virtual_address_max;
606 /** Required alignment of virtual addresses. */ 606 /** Required alignment of virtual addresses. */
607 uint32_t virtual_address_alignment; 607 __u32 virtual_address_alignment;
608 /** Page table entry - fragment size */ 608 /** Page table entry - fragment size */
609 uint32_t pte_fragment_size; 609 __u32 pte_fragment_size;
610 uint32_t gart_page_size; 610 __u32 gart_page_size;
611 /** constant engine ram size*/ 611 /** constant engine ram size*/
612 uint32_t ce_ram_size; 612 __u32 ce_ram_size;
613 /** video memory type info*/ 613 /** video memory type info*/
614 uint32_t vram_type; 614 __u32 vram_type;
615 /** video memory bit width*/ 615 /** video memory bit width*/
616 uint32_t vram_bit_width; 616 __u32 vram_bit_width;
617 /* vce harvesting instance */ 617 /* vce harvesting instance */
618 uint32_t vce_harvest_config; 618 __u32 vce_harvest_config;
619}; 619};
620 620
621struct drm_amdgpu_info_hw_ip { 621struct drm_amdgpu_info_hw_ip {
622 /** Version of h/w IP */ 622 /** Version of h/w IP */
623 uint32_t hw_ip_version_major; 623 __u32 hw_ip_version_major;
624 uint32_t hw_ip_version_minor; 624 __u32 hw_ip_version_minor;
625 /** Capabilities */ 625 /** Capabilities */
626 uint64_t capabilities_flags; 626 __u64 capabilities_flags;
627 /** command buffer address start alignment*/ 627 /** command buffer address start alignment*/
628 uint32_t ib_start_alignment; 628 __u32 ib_start_alignment;
629 /** command buffer size alignment*/ 629 /** command buffer size alignment*/
630 uint32_t ib_size_alignment; 630 __u32 ib_size_alignment;
631 /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 631 /** Bitmask of available rings. Bit 0 means ring 0, etc. */
632 uint32_t available_rings; 632 __u32 available_rings;
633 uint32_t _pad; 633 __u32 _pad;
634}; 634};
635 635
636/* 636/*
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
index 8dec3fdc99c7..6de7f0196ca0 100644
--- a/include/uapi/drm/armada_drm.h
+++ b/include/uapi/drm/armada_drm.h
@@ -9,6 +9,8 @@
9#ifndef DRM_ARMADA_IOCTL_H 9#ifndef DRM_ARMADA_IOCTL_H
10#define DRM_ARMADA_IOCTL_H 10#define DRM_ARMADA_IOCTL_H
11 11
12#include "drm.h"
13
12#define DRM_ARMADA_GEM_CREATE 0x00 14#define DRM_ARMADA_GEM_CREATE 0x00
13#define DRM_ARMADA_GEM_MMAP 0x02 15#define DRM_ARMADA_GEM_MMAP 0x02
14#define DRM_ARMADA_GEM_PWRITE 0x03 16#define DRM_ARMADA_GEM_PWRITE 0x03
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 3801584a0c53..b4e92eb12044 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -54,6 +54,7 @@ typedef int32_t __s32;
54typedef uint32_t __u32; 54typedef uint32_t __u32;
55typedef int64_t __s64; 55typedef int64_t __s64;
56typedef uint64_t __u64; 56typedef uint64_t __u64;
57typedef size_t __kernel_size_t;
57typedef unsigned long drm_handle_t; 58typedef unsigned long drm_handle_t;
58 59
59#endif 60#endif
@@ -129,11 +130,11 @@ struct drm_version {
129 int version_major; /**< Major version */ 130 int version_major; /**< Major version */
130 int version_minor; /**< Minor version */ 131 int version_minor; /**< Minor version */
131 int version_patchlevel; /**< Patch level */ 132 int version_patchlevel; /**< Patch level */
132 size_t name_len; /**< Length of name buffer */ 133 __kernel_size_t name_len; /**< Length of name buffer */
133 char __user *name; /**< Name of driver */ 134 char __user *name; /**< Name of driver */
134 size_t date_len; /**< Length of date buffer */ 135 __kernel_size_t date_len; /**< Length of date buffer */
135 char __user *date; /**< User-space buffer to hold date */ 136 char __user *date; /**< User-space buffer to hold date */
136 size_t desc_len; /**< Length of desc buffer */ 137 __kernel_size_t desc_len; /**< Length of desc buffer */
137 char __user *desc; /**< User-space buffer to hold desc */ 138 char __user *desc; /**< User-space buffer to hold desc */
138}; 139};
139 140
@@ -143,7 +144,7 @@ struct drm_version {
143 * \sa drmGetBusid() and drmSetBusId(). 144 * \sa drmGetBusid() and drmSetBusId().
144 */ 145 */
145struct drm_unique { 146struct drm_unique {
146 size_t unique_len; /**< Length of unique */ 147 __kernel_size_t unique_len; /**< Length of unique */
147 char __user *unique; /**< Unique name for driver instantiation */ 148 char __user *unique; /**< Unique name for driver instantiation */
148}; 149};
149 150
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0b69a7753558..998bd253faad 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -24,7 +24,7 @@
24#ifndef DRM_FOURCC_H 24#ifndef DRM_FOURCC_H
25#define DRM_FOURCC_H 25#define DRM_FOURCC_H
26 26
27#include <linux/types.h> 27#include "drm.h"
28 28
29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ 29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
30 ((__u32)(c) << 16) | ((__u32)(d) << 24)) 30 ((__u32)(c) << 16) | ((__u32)(d) << 24))
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 6c11ca401de8..50adb46204c2 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -27,7 +27,7 @@
27#ifndef _DRM_MODE_H 27#ifndef _DRM_MODE_H
28#define _DRM_MODE_H 28#define _DRM_MODE_H
29 29
30#include <linux/types.h> 30#include "drm.h"
31 31
32#define DRM_DISPLAY_INFO_LEN 32 32#define DRM_DISPLAY_INFO_LEN 32
33#define DRM_CONNECTOR_NAME_LEN 32 33#define DRM_CONNECTOR_NAME_LEN 32
@@ -526,14 +526,14 @@ struct drm_mode_crtc_page_flip {
526 526
527/* create a dumb scanout buffer */ 527/* create a dumb scanout buffer */
528struct drm_mode_create_dumb { 528struct drm_mode_create_dumb {
529 uint32_t height; 529 __u32 height;
530 uint32_t width; 530 __u32 width;
531 uint32_t bpp; 531 __u32 bpp;
532 uint32_t flags; 532 __u32 flags;
533 /* handle, pitch, size will be returned */ 533 /* handle, pitch, size will be returned */
534 uint32_t handle; 534 __u32 handle;
535 uint32_t pitch; 535 __u32 pitch;
536 uint64_t size; 536 __u64 size;
537}; 537};
538 538
539/* set up for mmap of a dumb scanout buffer */ 539/* set up for mmap of a dumb scanout buffer */
@@ -550,7 +550,7 @@ struct drm_mode_map_dumb {
550}; 550};
551 551
552struct drm_mode_destroy_dumb { 552struct drm_mode_destroy_dumb {
553 uint32_t handle; 553 __u32 handle;
554}; 554};
555 555
556/* page-flip flags are valid, plus: */ 556/* page-flip flags are valid, plus: */
diff --git a/include/uapi/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h
index 413a5642d49f..1d1a858a203d 100644
--- a/include/uapi/drm/drm_sarea.h
+++ b/include/uapi/drm/drm_sarea.h
@@ -32,7 +32,7 @@
32#ifndef _DRM_SAREA_H_ 32#ifndef _DRM_SAREA_H_
33#define _DRM_SAREA_H_ 33#define _DRM_SAREA_H_
34 34
35#include <drm/drm.h> 35#include "drm.h"
36 36
37/* SAREA area needs to be at least a page */ 37/* SAREA area needs to be at least a page */
38#if defined(__alpha__) 38#if defined(__alpha__)
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
new file mode 100644
index 000000000000..4cc989ad6851
--- /dev/null
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -0,0 +1,222 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_DRM_H__
18#define __ETNAVIV_DRM_H__
19
20#include "drm.h"
21
22/* Please note that modifications to all structs defined here are
23 * subject to backwards-compatibility constraints:
24 * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
25 * user/kernel compatibility
26 * 2) Keep fields aligned to their size
27 * 3) Because of how drm_ioctl() works, we can add new fields at
28 * the end of an ioctl if some care is taken: drm_ioctl() will
29 * zero out the new fields at the tail of the ioctl, so a zero
30 * value should have a backwards compatible meaning. And for
31 * output params, userspace won't see the newly added output
32 * fields.. so that has to be somehow ok.
33 */
34
35/* timeouts are specified in clock-monotonic absolute times (to simplify
36 * restarting interrupted ioctls). The following struct is logically the
37 * same as 'struct timespec' but 32/64b ABI safe.
38 */
39struct drm_etnaviv_timespec {
40 __s64 tv_sec; /* seconds */
41 __s64 tv_nsec; /* nanoseconds */
42};
43
44#define ETNAVIV_PARAM_GPU_MODEL 0x01
45#define ETNAVIV_PARAM_GPU_REVISION 0x02
46#define ETNAVIV_PARAM_GPU_FEATURES_0 0x03
47#define ETNAVIV_PARAM_GPU_FEATURES_1 0x04
48#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
49#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
50#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
51
52#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
53#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
54#define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12
55#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13
56#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14
57#define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15
58#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
59#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
60#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
61#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
62
63#define ETNA_MAX_PIPES 4
64
65struct drm_etnaviv_param {
66 __u32 pipe; /* in */
67 __u32 param; /* in, ETNAVIV_PARAM_x */
68 __u64 value; /* out (get_param) or in (set_param) */
69};
70
71/*
72 * GEM buffers:
73 */
74
75#define ETNA_BO_CACHE_MASK 0x000f0000
76/* cache modes */
77#define ETNA_BO_CACHED 0x00010000
78#define ETNA_BO_WC 0x00020000
79#define ETNA_BO_UNCACHED 0x00040000
80/* map flags */
81#define ETNA_BO_FORCE_MMU 0x00100000
82
83struct drm_etnaviv_gem_new {
84 __u64 size; /* in */
85 __u32 flags; /* in, mask of ETNA_BO_x */
86 __u32 handle; /* out */
87};
88
89struct drm_etnaviv_gem_info {
90 __u32 handle; /* in */
91 __u32 pad;
92 __u64 offset; /* out, offset to pass to mmap() */
93};
94
95#define ETNA_PREP_READ 0x01
96#define ETNA_PREP_WRITE 0x02
97#define ETNA_PREP_NOSYNC 0x04
98
99struct drm_etnaviv_gem_cpu_prep {
100 __u32 handle; /* in */
101 __u32 op; /* in, mask of ETNA_PREP_x */
102 struct drm_etnaviv_timespec timeout; /* in */
103};
104
105struct drm_etnaviv_gem_cpu_fini {
106 __u32 handle; /* in */
107 __u32 flags; /* in, placeholder for now, no defined values */
108};
109
110/*
111 * Cmdstream Submission:
112 */
113
114/* The value written into the cmdstream is logically:
115 * relocbuf->gpuaddr + reloc_offset
116 *
117 * NOTE that reloc's must be sorted by order of increasing submit_offset,
118 * otherwise EINVAL.
119 */
120struct drm_etnaviv_gem_submit_reloc {
121 __u32 submit_offset; /* in, offset from submit_bo */
122 __u32 reloc_idx; /* in, index of reloc_bo buffer */
123 __u64 reloc_offset; /* in, offset from start of reloc_bo */
124 __u32 flags; /* in, placeholder for now, no defined values */
125};
126
127/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
128 * cmdstream buffer(s) themselves or reloc entries) has one (and only
129 * one) entry in the submit->bos[] table.
130 *
131 * As a optimization, the current buffer (gpu virtual address) can be
132 * passed back through the 'presumed' field. If on a subsequent reloc,
133 * userspace passes back a 'presumed' address that is still valid,
134 * then patching the cmdstream for this entry is skipped. This can
135 * avoid kernel needing to map/access the cmdstream bo in the common
136 * case.
137 */
138#define ETNA_SUBMIT_BO_READ 0x0001
139#define ETNA_SUBMIT_BO_WRITE 0x0002
140struct drm_etnaviv_gem_submit_bo {
141 __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */
142 __u32 handle; /* in, GEM handle */
143 __u64 presumed; /* in/out, presumed buffer address */
144};
145
146/* Each cmdstream submit consists of a table of buffers involved, and
147 * one or more cmdstream buffers. This allows for conditional execution
148 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
149 */
150#define ETNA_PIPE_3D 0x00
151#define ETNA_PIPE_2D 0x01
152#define ETNA_PIPE_VG 0x02
153struct drm_etnaviv_gem_submit {
154 __u32 fence; /* out */
155 __u32 pipe; /* in */
156 __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */
157 __u32 nr_bos; /* in, number of submit_bo's */
158 __u32 nr_relocs; /* in, number of submit_reloc's */
159 __u32 stream_size; /* in, cmdstream size */
160 __u64 bos; /* in, ptr to array of submit_bo's */
161 __u64 relocs; /* in, ptr to array of submit_reloc's */
162 __u64 stream; /* in, ptr to cmdstream */
163};
164
165/* The normal way to synchronize with the GPU is just to CPU_PREP on
166 * a buffer if you need to access it from the CPU (other cmdstream
167 * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
168 * handle the required synchronization under the hood). This ioctl
169 * mainly just exists as a way to implement the gallium pipe_fence
170 * APIs without requiring a dummy bo to synchronize on.
171 */
172#define ETNA_WAIT_NONBLOCK 0x01
173struct drm_etnaviv_wait_fence {
174 __u32 pipe; /* in */
175 __u32 fence; /* in */
176 __u32 flags; /* in, mask of ETNA_WAIT_x */
177 __u32 pad;
178 struct drm_etnaviv_timespec timeout; /* in */
179};
180
181#define ETNA_USERPTR_READ 0x01
182#define ETNA_USERPTR_WRITE 0x02
183struct drm_etnaviv_gem_userptr {
184 __u64 user_ptr; /* in, page aligned user pointer */
185 __u64 user_size; /* in, page aligned user size */
186 __u32 flags; /* in, flags */
187 __u32 handle; /* out, non-zero handle */
188};
189
190struct drm_etnaviv_gem_wait {
191 __u32 pipe; /* in */
192 __u32 handle; /* in, bo to be waited for */
193 __u32 flags; /* in, mask of ETNA_WAIT_x */
194 __u32 pad;
195 struct drm_etnaviv_timespec timeout; /* in */
196};
197
198#define DRM_ETNAVIV_GET_PARAM 0x00
199/* placeholder:
200#define DRM_ETNAVIV_SET_PARAM 0x01
201 */
202#define DRM_ETNAVIV_GEM_NEW 0x02
203#define DRM_ETNAVIV_GEM_INFO 0x03
204#define DRM_ETNAVIV_GEM_CPU_PREP 0x04
205#define DRM_ETNAVIV_GEM_CPU_FINI 0x05
206#define DRM_ETNAVIV_GEM_SUBMIT 0x06
207#define DRM_ETNAVIV_WAIT_FENCE 0x07
208#define DRM_ETNAVIV_GEM_USERPTR 0x08
209#define DRM_ETNAVIV_GEM_WAIT 0x09
210#define DRM_ETNAVIV_NUM_IOCTLS 0x0a
211
212#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
213#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
214#define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
215#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
216#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
217#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
218#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
219#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
220#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
221
222#endif /* __ETNAVIV_DRM_H__ */
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 5575ed1598bd..312c67d744ae 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -15,7 +15,7 @@
15#ifndef _UAPI_EXYNOS_DRM_H_ 15#ifndef _UAPI_EXYNOS_DRM_H_
16#define _UAPI_EXYNOS_DRM_H_ 16#define _UAPI_EXYNOS_DRM_H_
17 17
18#include <drm/drm.h> 18#include "drm.h"
19 19
20/** 20/**
21 * User-desired buffer creation information structure. 21 * User-desired buffer creation information structure.
@@ -27,7 +27,7 @@
27 * - this handle will be set by gem module of kernel side. 27 * - this handle will be set by gem module of kernel side.
28 */ 28 */
29struct drm_exynos_gem_create { 29struct drm_exynos_gem_create {
30 uint64_t size; 30 __u64 size;
31 unsigned int flags; 31 unsigned int flags;
32 unsigned int handle; 32 unsigned int handle;
33}; 33};
@@ -44,7 +44,7 @@ struct drm_exynos_gem_create {
44struct drm_exynos_gem_info { 44struct drm_exynos_gem_info {
45 unsigned int handle; 45 unsigned int handle;
46 unsigned int flags; 46 unsigned int flags;
47 uint64_t size; 47 __u64 size;
48}; 48};
49 49
50/** 50/**
@@ -58,7 +58,7 @@ struct drm_exynos_gem_info {
58struct drm_exynos_vidi_connection { 58struct drm_exynos_vidi_connection {
59 unsigned int connection; 59 unsigned int connection;
60 unsigned int extensions; 60 unsigned int extensions;
61 uint64_t edid; 61 __u64 edid;
62}; 62};
63 63
64/* memory type definitions. */ 64/* memory type definitions. */
diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h
index 34736efd5824..bdb028723ded 100644
--- a/include/uapi/drm/i810_drm.h
+++ b/include/uapi/drm/i810_drm.h
@@ -1,7 +1,7 @@
1#ifndef _I810_DRM_H_ 1#ifndef _I810_DRM_H_
2#define _I810_DRM_H_ 2#define _I810_DRM_H_
3 3
4#include <drm/drm.h> 4#include "drm.h"
5 5
6/* WARNING: These defines must be the same as what the Xserver uses. 6/* WARNING: These defines must be the same as what the Xserver uses.
7 * if you change them, you must change the defines in the Xserver. 7 * if you change them, you must change the defines in the Xserver.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index d727b49f07ac..acf21026c78a 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -27,7 +27,7 @@
27#ifndef _UAPI_I915_DRM_H_ 27#ifndef _UAPI_I915_DRM_H_
28#define _UAPI_I915_DRM_H_ 28#define _UAPI_I915_DRM_H_
29 29
30#include <drm/drm.h> 30#include "drm.h"
31 31
32/* Please note that modifications to all structs defined here are 32/* Please note that modifications to all structs defined here are
33 * subject to backwards-compatibility constraints. 33 * subject to backwards-compatibility constraints.
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
index 2375bfd6e5e9..fca817009e13 100644
--- a/include/uapi/drm/mga_drm.h
+++ b/include/uapi/drm/mga_drm.h
@@ -35,7 +35,7 @@
35#ifndef __MGA_DRM_H__ 35#ifndef __MGA_DRM_H__
36#define __MGA_DRM_H__ 36#define __MGA_DRM_H__
37 37
38#include <drm/drm.h> 38#include "drm.h"
39 39
40/* WARNING: If you change any of these defines, make sure to change the 40/* WARNING: If you change any of these defines, make sure to change the
41 * defines in the Xserver file (mga_sarea.h) 41 * defines in the Xserver file (mga_sarea.h)
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 75a232b9a970..81e6e0d1d360 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -18,8 +18,7 @@
18#ifndef __MSM_DRM_H__ 18#ifndef __MSM_DRM_H__
19#define __MSM_DRM_H__ 19#define __MSM_DRM_H__
20 20
21#include <stddef.h> 21#include "drm.h"
22#include <drm/drm.h>
23 22
24/* Please note that modifications to all structs defined here are 23/* Please note that modifications to all structs defined here are
25 * subject to backwards-compatibility constraints: 24 * subject to backwards-compatibility constraints:
@@ -122,7 +121,7 @@ struct drm_msm_gem_cpu_fini {
122struct drm_msm_gem_submit_reloc { 121struct drm_msm_gem_submit_reloc {
123 __u32 submit_offset; /* in, offset from submit_bo */ 122 __u32 submit_offset; /* in, offset from submit_bo */
124 __u32 or; /* in, value OR'd with result */ 123 __u32 or; /* in, value OR'd with result */
125 __s32 shift; /* in, amount of left shift (can be negative) */ 124 __s32 shift; /* in, amount of left shift (can be negative) */
126 __u32 reloc_idx; /* in, index of reloc_bo buffer */ 125 __u32 reloc_idx; /* in, index of reloc_bo buffer */
127 __u64 reloc_offset; /* in, offset from start of reloc_bo */ 126 __u64 reloc_offset; /* in, offset from start of reloc_bo */
128}; 127};
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index fd594cc73cc0..500d82aecbe4 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -27,6 +27,8 @@
27 27
28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000 28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
29 29
30#include <drm/drm.h>
31
30#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) 32#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
31#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 33#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
32#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 34#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
@@ -41,34 +43,34 @@
41#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 43#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
42 44
43struct drm_nouveau_gem_info { 45struct drm_nouveau_gem_info {
44 uint32_t handle; 46 __u32 handle;
45 uint32_t domain; 47 __u32 domain;
46 uint64_t size; 48 __u64 size;
47 uint64_t offset; 49 __u64 offset;
48 uint64_t map_handle; 50 __u64 map_handle;
49 uint32_t tile_mode; 51 __u32 tile_mode;
50 uint32_t tile_flags; 52 __u32 tile_flags;
51}; 53};
52 54
53struct drm_nouveau_gem_new { 55struct drm_nouveau_gem_new {
54 struct drm_nouveau_gem_info info; 56 struct drm_nouveau_gem_info info;
55 uint32_t channel_hint; 57 __u32 channel_hint;
56 uint32_t align; 58 __u32 align;
57}; 59};
58 60
59#define NOUVEAU_GEM_MAX_BUFFERS 1024 61#define NOUVEAU_GEM_MAX_BUFFERS 1024
60struct drm_nouveau_gem_pushbuf_bo_presumed { 62struct drm_nouveau_gem_pushbuf_bo_presumed {
61 uint32_t valid; 63 __u32 valid;
62 uint32_t domain; 64 __u32 domain;
63 uint64_t offset; 65 __u64 offset;
64}; 66};
65 67
66struct drm_nouveau_gem_pushbuf_bo { 68struct drm_nouveau_gem_pushbuf_bo {
67 uint64_t user_priv; 69 __u64 user_priv;
68 uint32_t handle; 70 __u32 handle;
69 uint32_t read_domains; 71 __u32 read_domains;
70 uint32_t write_domains; 72 __u32 write_domains;
71 uint32_t valid_domains; 73 __u32 valid_domains;
72 struct drm_nouveau_gem_pushbuf_bo_presumed presumed; 74 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
73}; 75};
74 76
@@ -77,46 +79,46 @@ struct drm_nouveau_gem_pushbuf_bo {
77#define NOUVEAU_GEM_RELOC_OR (1 << 2) 79#define NOUVEAU_GEM_RELOC_OR (1 << 2)
78#define NOUVEAU_GEM_MAX_RELOCS 1024 80#define NOUVEAU_GEM_MAX_RELOCS 1024
79struct drm_nouveau_gem_pushbuf_reloc { 81struct drm_nouveau_gem_pushbuf_reloc {
80 uint32_t reloc_bo_index; 82 __u32 reloc_bo_index;
81 uint32_t reloc_bo_offset; 83 __u32 reloc_bo_offset;
82 uint32_t bo_index; 84 __u32 bo_index;
83 uint32_t flags; 85 __u32 flags;
84 uint32_t data; 86 __u32 data;
85 uint32_t vor; 87 __u32 vor;
86 uint32_t tor; 88 __u32 tor;
87}; 89};
88 90
89#define NOUVEAU_GEM_MAX_PUSH 512 91#define NOUVEAU_GEM_MAX_PUSH 512
90struct drm_nouveau_gem_pushbuf_push { 92struct drm_nouveau_gem_pushbuf_push {
91 uint32_t bo_index; 93 __u32 bo_index;
92 uint32_t pad; 94 __u32 pad;
93 uint64_t offset; 95 __u64 offset;
94 uint64_t length; 96 __u64 length;
95}; 97};
96 98
97struct drm_nouveau_gem_pushbuf { 99struct drm_nouveau_gem_pushbuf {
98 uint32_t channel; 100 __u32 channel;
99 uint32_t nr_buffers; 101 __u32 nr_buffers;
100 uint64_t buffers; 102 __u64 buffers;
101 uint32_t nr_relocs; 103 __u32 nr_relocs;
102 uint32_t nr_push; 104 __u32 nr_push;
103 uint64_t relocs; 105 __u64 relocs;
104 uint64_t push; 106 __u64 push;
105 uint32_t suffix0; 107 __u32 suffix0;
106 uint32_t suffix1; 108 __u32 suffix1;
107 uint64_t vram_available; 109 __u64 vram_available;
108 uint64_t gart_available; 110 __u64 gart_available;
109}; 111};
110 112
111#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 113#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
112#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 114#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
113struct drm_nouveau_gem_cpu_prep { 115struct drm_nouveau_gem_cpu_prep {
114 uint32_t handle; 116 __u32 handle;
115 uint32_t flags; 117 __u32 flags;
116}; 118};
117 119
118struct drm_nouveau_gem_cpu_fini { 120struct drm_nouveau_gem_cpu_fini {
119 uint32_t handle; 121 __u32 handle;
120}; 122};
121 123
122#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */ 124#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 1d0b1172664e..38a3bd847e15 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -20,7 +20,7 @@
20#ifndef __OMAP_DRM_H__ 20#ifndef __OMAP_DRM_H__
21#define __OMAP_DRM_H__ 21#define __OMAP_DRM_H__
22 22
23#include <drm/drm.h> 23#include "drm.h"
24 24
25/* Please note that modifications to all structs defined here are 25/* Please note that modifications to all structs defined here are
26 * subject to backwards-compatibility constraints. 26 * subject to backwards-compatibility constraints.
@@ -101,9 +101,6 @@ struct drm_omap_gem_info {
101 101
102#define DRM_OMAP_GET_PARAM 0x00 102#define DRM_OMAP_GET_PARAM 0x00
103#define DRM_OMAP_SET_PARAM 0x01 103#define DRM_OMAP_SET_PARAM 0x01
104/* placeholder for plugin-api
105#define DRM_OMAP_GET_BASE 0x02
106*/
107#define DRM_OMAP_GEM_NEW 0x03 104#define DRM_OMAP_GEM_NEW 0x03
108#define DRM_OMAP_GEM_CPU_PREP 0x04 105#define DRM_OMAP_GEM_CPU_PREP 0x04
109#define DRM_OMAP_GEM_CPU_FINI 0x05 106#define DRM_OMAP_GEM_CPU_FINI 0x05
@@ -112,9 +109,6 @@ struct drm_omap_gem_info {
112 109
113#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param) 110#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
114#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param) 111#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
115/* placeholder for plugin-api
116#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
117*/
118#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new) 112#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
119#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep) 113#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
120#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini) 114#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
index ebebd36c4117..4d1e32640463 100644
--- a/include/uapi/drm/qxl_drm.h
+++ b/include/uapi/drm/qxl_drm.h
@@ -24,13 +24,12 @@
24#ifndef QXL_DRM_H 24#ifndef QXL_DRM_H
25#define QXL_DRM_H 25#define QXL_DRM_H
26 26
27#include <stddef.h> 27#include "drm.h"
28#include "drm/drm.h"
29 28
30/* Please note that modifications to all structs defined here are 29/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 30 * subject to backwards-compatibility constraints.
32 * 31 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 32 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 33 * compatibility Keep fields aligned to their size
35 */ 34 */
36 35
@@ -48,14 +47,14 @@
48#define DRM_QXL_ALLOC_SURF 0x06 47#define DRM_QXL_ALLOC_SURF 0x06
49 48
50struct drm_qxl_alloc { 49struct drm_qxl_alloc {
51 uint32_t size; 50 __u32 size;
52 uint32_t handle; /* 0 is an invalid handle */ 51 __u32 handle; /* 0 is an invalid handle */
53}; 52};
54 53
55struct drm_qxl_map { 54struct drm_qxl_map {
56 uint64_t offset; /* use for mmap system call */ 55 __u64 offset; /* use for mmap system call */
57 uint32_t handle; 56 __u32 handle;
58 uint32_t pad; 57 __u32 pad;
59}; 58};
60 59
61/* 60/*
@@ -68,59 +67,59 @@ struct drm_qxl_map {
68#define QXL_RELOC_TYPE_SURF 2 67#define QXL_RELOC_TYPE_SURF 2
69 68
70struct drm_qxl_reloc { 69struct drm_qxl_reloc {
71 uint64_t src_offset; /* offset into src_handle or src buffer */ 70 __u64 src_offset; /* offset into src_handle or src buffer */
72 uint64_t dst_offset; /* offset in dest handle */ 71 __u64 dst_offset; /* offset in dest handle */
73 uint32_t src_handle; /* dest handle to compute address from */ 72 __u32 src_handle; /* dest handle to compute address from */
74 uint32_t dst_handle; /* 0 if to command buffer */ 73 __u32 dst_handle; /* 0 if to command buffer */
75 uint32_t reloc_type; 74 __u32 reloc_type;
76 uint32_t pad; 75 __u32 pad;
77}; 76};
78 77
79struct drm_qxl_command { 78struct drm_qxl_command {
80 uint64_t __user command; /* void* */ 79 __u64 __user command; /* void* */
81 uint64_t __user relocs; /* struct drm_qxl_reloc* */ 80 __u64 __user relocs; /* struct drm_qxl_reloc* */
82 uint32_t type; 81 __u32 type;
83 uint32_t command_size; 82 __u32 command_size;
84 uint32_t relocs_num; 83 __u32 relocs_num;
85 uint32_t pad; 84 __u32 pad;
86}; 85};
87 86
88/* XXX: call it drm_qxl_commands? */ 87/* XXX: call it drm_qxl_commands? */
89struct drm_qxl_execbuffer { 88struct drm_qxl_execbuffer {
90 uint32_t flags; /* for future use */ 89 __u32 flags; /* for future use */
91 uint32_t commands_num; 90 __u32 commands_num;
92 uint64_t __user commands; /* struct drm_qxl_command* */ 91 __u64 __user commands; /* struct drm_qxl_command* */
93}; 92};
94 93
95struct drm_qxl_update_area { 94struct drm_qxl_update_area {
96 uint32_t handle; 95 __u32 handle;
97 uint32_t top; 96 __u32 top;
98 uint32_t left; 97 __u32 left;
99 uint32_t bottom; 98 __u32 bottom;
100 uint32_t right; 99 __u32 right;
101 uint32_t pad; 100 __u32 pad;
102}; 101};
103 102
104#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ 103#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
105#define QXL_PARAM_MAX_RELOCS 2 104#define QXL_PARAM_MAX_RELOCS 2
106struct drm_qxl_getparam { 105struct drm_qxl_getparam {
107 uint64_t param; 106 __u64 param;
108 uint64_t value; 107 __u64 value;
109}; 108};
110 109
111/* these are one bit values */ 110/* these are one bit values */
112struct drm_qxl_clientcap { 111struct drm_qxl_clientcap {
113 uint32_t index; 112 __u32 index;
114 uint32_t pad; 113 __u32 pad;
115}; 114};
116 115
117struct drm_qxl_alloc_surf { 116struct drm_qxl_alloc_surf {
118 uint32_t format; 117 __u32 format;
119 uint32_t width; 118 __u32 width;
120 uint32_t height; 119 __u32 height;
121 int32_t stride; 120 __s32 stride;
122 uint32_t handle; 121 __u32 handle;
123 uint32_t pad; 122 __u32 pad;
124}; 123};
125 124
126#define DRM_IOCTL_QXL_ALLOC \ 125#define DRM_IOCTL_QXL_ALLOC \
diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h
index 76b0aa3e8210..7a44c6500a7e 100644
--- a/include/uapi/drm/r128_drm.h
+++ b/include/uapi/drm/r128_drm.h
@@ -33,7 +33,7 @@
33#ifndef __R128_DRM_H__ 33#ifndef __R128_DRM_H__
34#define __R128_DRM_H__ 34#define __R128_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (r128_sarea.h) 39 * defines in the X server file (r128_sarea.h)
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 01aa2a8e3f8d..ccb9bcd82685 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -793,9 +793,9 @@ typedef struct drm_radeon_surface_free {
793#define RADEON_GEM_DOMAIN_VRAM 0x4 793#define RADEON_GEM_DOMAIN_VRAM 0x4
794 794
795struct drm_radeon_gem_info { 795struct drm_radeon_gem_info {
796 uint64_t gart_size; 796 __u64 gart_size;
797 uint64_t vram_size; 797 __u64 vram_size;
798 uint64_t vram_visible; 798 __u64 vram_visible;
799}; 799};
800 800
801#define RADEON_GEM_NO_BACKING_STORE (1 << 0) 801#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
@@ -807,11 +807,11 @@ struct drm_radeon_gem_info {
807#define RADEON_GEM_NO_CPU_ACCESS (1 << 4) 807#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
808 808
809struct drm_radeon_gem_create { 809struct drm_radeon_gem_create {
810 uint64_t size; 810 __u64 size;
811 uint64_t alignment; 811 __u64 alignment;
812 uint32_t handle; 812 __u32 handle;
813 uint32_t initial_domain; 813 __u32 initial_domain;
814 uint32_t flags; 814 __u32 flags;
815}; 815};
816 816
817/* 817/*
@@ -825,10 +825,10 @@ struct drm_radeon_gem_create {
825#define RADEON_GEM_USERPTR_REGISTER (1 << 3) 825#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
826 826
827struct drm_radeon_gem_userptr { 827struct drm_radeon_gem_userptr {
828 uint64_t addr; 828 __u64 addr;
829 uint64_t size; 829 __u64 size;
830 uint32_t flags; 830 __u32 flags;
831 uint32_t handle; 831 __u32 handle;
832}; 832};
833 833
834#define RADEON_TILING_MACRO 0x1 834#define RADEON_TILING_MACRO 0x1
@@ -850,72 +850,72 @@ struct drm_radeon_gem_userptr {
850#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf 850#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
851 851
852struct drm_radeon_gem_set_tiling { 852struct drm_radeon_gem_set_tiling {
853 uint32_t handle; 853 __u32 handle;
854 uint32_t tiling_flags; 854 __u32 tiling_flags;
855 uint32_t pitch; 855 __u32 pitch;
856}; 856};
857 857
858struct drm_radeon_gem_get_tiling { 858struct drm_radeon_gem_get_tiling {
859 uint32_t handle; 859 __u32 handle;
860 uint32_t tiling_flags; 860 __u32 tiling_flags;
861 uint32_t pitch; 861 __u32 pitch;
862}; 862};
863 863
864struct drm_radeon_gem_mmap { 864struct drm_radeon_gem_mmap {
865 uint32_t handle; 865 __u32 handle;
866 uint32_t pad; 866 __u32 pad;
867 uint64_t offset; 867 __u64 offset;
868 uint64_t size; 868 __u64 size;
869 uint64_t addr_ptr; 869 __u64 addr_ptr;
870}; 870};
871 871
872struct drm_radeon_gem_set_domain { 872struct drm_radeon_gem_set_domain {
873 uint32_t handle; 873 __u32 handle;
874 uint32_t read_domains; 874 __u32 read_domains;
875 uint32_t write_domain; 875 __u32 write_domain;
876}; 876};
877 877
878struct drm_radeon_gem_wait_idle { 878struct drm_radeon_gem_wait_idle {
879 uint32_t handle; 879 __u32 handle;
880 uint32_t pad; 880 __u32 pad;
881}; 881};
882 882
883struct drm_radeon_gem_busy { 883struct drm_radeon_gem_busy {
884 uint32_t handle; 884 __u32 handle;
885 uint32_t domain; 885 __u32 domain;
886}; 886};
887 887
888struct drm_radeon_gem_pread { 888struct drm_radeon_gem_pread {
889 /** Handle for the object being read. */ 889 /** Handle for the object being read. */
890 uint32_t handle; 890 __u32 handle;
891 uint32_t pad; 891 __u32 pad;
892 /** Offset into the object to read from */ 892 /** Offset into the object to read from */
893 uint64_t offset; 893 __u64 offset;
894 /** Length of data to read */ 894 /** Length of data to read */
895 uint64_t size; 895 __u64 size;
896 /** Pointer to write the data into. */ 896 /** Pointer to write the data into. */
897 /* void *, but pointers are not 32/64 compatible */ 897 /* void *, but pointers are not 32/64 compatible */
898 uint64_t data_ptr; 898 __u64 data_ptr;
899}; 899};
900 900
901struct drm_radeon_gem_pwrite { 901struct drm_radeon_gem_pwrite {
902 /** Handle for the object being written to. */ 902 /** Handle for the object being written to. */
903 uint32_t handle; 903 __u32 handle;
904 uint32_t pad; 904 __u32 pad;
905 /** Offset into the object to write to */ 905 /** Offset into the object to write to */
906 uint64_t offset; 906 __u64 offset;
907 /** Length of data to write */ 907 /** Length of data to write */
908 uint64_t size; 908 __u64 size;
909 /** Pointer to read the data from. */ 909 /** Pointer to read the data from. */
910 /* void *, but pointers are not 32/64 compatible */ 910 /* void *, but pointers are not 32/64 compatible */
911 uint64_t data_ptr; 911 __u64 data_ptr;
912}; 912};
913 913
914/* Sets or returns a value associated with a buffer. */ 914/* Sets or returns a value associated with a buffer. */
915struct drm_radeon_gem_op { 915struct drm_radeon_gem_op {
916 uint32_t handle; /* buffer */ 916 __u32 handle; /* buffer */
917 uint32_t op; /* RADEON_GEM_OP_* */ 917 __u32 op; /* RADEON_GEM_OP_* */
918 uint64_t value; /* input or return value */ 918 __u64 value; /* input or return value */
919}; 919};
920 920
921#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 921#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
@@ -935,11 +935,11 @@ struct drm_radeon_gem_op {
935#define RADEON_VM_PAGE_SNOOPED (1 << 4) 935#define RADEON_VM_PAGE_SNOOPED (1 << 4)
936 936
937struct drm_radeon_gem_va { 937struct drm_radeon_gem_va {
938 uint32_t handle; 938 __u32 handle;
939 uint32_t operation; 939 __u32 operation;
940 uint32_t vm_id; 940 __u32 vm_id;
941 uint32_t flags; 941 __u32 flags;
942 uint64_t offset; 942 __u64 offset;
943}; 943};
944 944
945#define RADEON_CHUNK_ID_RELOCS 0x01 945#define RADEON_CHUNK_ID_RELOCS 0x01
@@ -961,29 +961,29 @@ struct drm_radeon_gem_va {
961/* 0 = normal, + = higher priority, - = lower priority */ 961/* 0 = normal, + = higher priority, - = lower priority */
962 962
963struct drm_radeon_cs_chunk { 963struct drm_radeon_cs_chunk {
964 uint32_t chunk_id; 964 __u32 chunk_id;
965 uint32_t length_dw; 965 __u32 length_dw;
966 uint64_t chunk_data; 966 __u64 chunk_data;
967}; 967};
968 968
969/* drm_radeon_cs_reloc.flags */ 969/* drm_radeon_cs_reloc.flags */
970#define RADEON_RELOC_PRIO_MASK (0xf << 0) 970#define RADEON_RELOC_PRIO_MASK (0xf << 0)
971 971
972struct drm_radeon_cs_reloc { 972struct drm_radeon_cs_reloc {
973 uint32_t handle; 973 __u32 handle;
974 uint32_t read_domains; 974 __u32 read_domains;
975 uint32_t write_domain; 975 __u32 write_domain;
976 uint32_t flags; 976 __u32 flags;
977}; 977};
978 978
979struct drm_radeon_cs { 979struct drm_radeon_cs {
980 uint32_t num_chunks; 980 __u32 num_chunks;
981 uint32_t cs_id; 981 __u32 cs_id;
982 /* this points to uint64_t * which point to cs chunks */ 982 /* this points to __u64 * which point to cs chunks */
983 uint64_t chunks; 983 __u64 chunks;
984 /* updates to the limits after this CS ioctl */ 984 /* updates to the limits after this CS ioctl */
985 uint64_t gart_limit; 985 __u64 gart_limit;
986 uint64_t vram_limit; 986 __u64 vram_limit;
987}; 987};
988 988
989#define RADEON_INFO_DEVICE_ID 0x00 989#define RADEON_INFO_DEVICE_ID 0x00
@@ -1042,9 +1042,9 @@ struct drm_radeon_cs {
1042#define RADEON_INFO_GPU_RESET_COUNTER 0x26 1042#define RADEON_INFO_GPU_RESET_COUNTER 0x26
1043 1043
1044struct drm_radeon_info { 1044struct drm_radeon_info {
1045 uint32_t request; 1045 __u32 request;
1046 uint32_t pad; 1046 __u32 pad;
1047 uint64_t value; 1047 __u64 value;
1048}; 1048};
1049 1049
1050/* Those correspond to the tile index to use, this is to explicitly state 1050/* Those correspond to the tile index to use, this is to explicitly state
diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h
index 9dc9dc1a7753..574147489c60 100644
--- a/include/uapi/drm/savage_drm.h
+++ b/include/uapi/drm/savage_drm.h
@@ -26,7 +26,7 @@
26#ifndef __SAVAGE_DRM_H__ 26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__ 27#define __SAVAGE_DRM_H__
28 28
29#include <drm/drm.h> 29#include "drm.h"
30 30
31#ifndef __SAVAGE_SAREA_DEFINES__ 31#ifndef __SAVAGE_SAREA_DEFINES__
32#define __SAVAGE_SAREA_DEFINES__ 32#define __SAVAGE_SAREA_DEFINES__
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 5391780c2b05..27d0b054aed0 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -23,7 +23,7 @@
23#ifndef _UAPI_TEGRA_DRM_H_ 23#ifndef _UAPI_TEGRA_DRM_H_
24#define _UAPI_TEGRA_DRM_H_ 24#define _UAPI_TEGRA_DRM_H_
25 25
26#include <drm/drm.h> 26#include "drm.h"
27 27
28#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) 28#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
29#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) 29#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
new file mode 100644
index 000000000000..eeb37e394f13
--- /dev/null
+++ b/include/uapi/drm/vc4_drm.h
@@ -0,0 +1,279 @@
1/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef _UAPI_VC4_DRM_H_
25#define _UAPI_VC4_DRM_H_
26
27#include "drm.h"
28
29#define DRM_VC4_SUBMIT_CL 0x00
30#define DRM_VC4_WAIT_SEQNO 0x01
31#define DRM_VC4_WAIT_BO 0x02
32#define DRM_VC4_CREATE_BO 0x03
33#define DRM_VC4_MMAP_BO 0x04
34#define DRM_VC4_CREATE_SHADER_BO 0x05
35#define DRM_VC4_GET_HANG_STATE 0x06
36
37#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
38#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
39#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
40#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
41#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
42#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
43#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
44
45struct drm_vc4_submit_rcl_surface {
46 __u32 hindex; /* Handle index, or ~0 if not present. */
47 __u32 offset; /* Offset to start of buffer. */
48 /*
49 * Bits for either render config (color_write) or load/store packet.
50 * Bits should all be 0 for MSAA load/stores.
51 */
52 __u16 bits;
53
54#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
55 __u16 flags;
56};
57
58/**
59 * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
60 * engine.
61 *
62 * Drivers typically use GPU BOs to store batchbuffers / command lists and
63 * their associated state. However, because the VC4 lacks an MMU, we have to
64 * do validation of memory accesses by the GPU commands. If we were to store
65 * our commands in BOs, we'd need to do uncached readback from them to do the
66 * validation process, which is too expensive. Instead, userspace accumulates
67 * commands and associated state in plain memory, then the kernel copies the
68 * data to its own address space, and then validates and stores it in a GPU
69 * BO.
70 */
71struct drm_vc4_submit_cl {
72 /* Pointer to the binner command list.
73 *
74 * This is the first set of commands executed, which runs the
75 * coordinate shader to determine where primitives land on the screen,
76 * then writes out the state updates and draw calls necessary per tile
77 * to the tile allocation BO.
78 */
79 __u64 bin_cl;
80
81 /* Pointer to the shader records.
82 *
83 * Shader records are the structures read by the hardware that contain
84 * pointers to uniforms, shaders, and vertex attributes. The
85 * reference to the shader record has enough information to determine
86 * how many pointers are necessary (fixed number for shaders/uniforms,
87 * and an attribute count), so those BO indices into bo_handles are
88 * just stored as __u32s before each shader record passed in.
89 */
90 __u64 shader_rec;
91
92 /* Pointer to uniform data and texture handles for the textures
93 * referenced by the shader.
94 *
95 * For each shader state record, there is a set of uniform data in the
96 * order referenced by the record (FS, VS, then CS). Each set of
97 * uniform data has a __u32 index into bo_handles per texture
98 * sample operation, in the order the QPU_W_TMUn_S writes appear in
99 * the program. Following the texture BO handle indices is the actual
100 * uniform data.
101 *
102 * The individual uniform state blocks don't have sizes passed in,
103 * because the kernel has to determine the sizes anyway during shader
104 * code validation.
105 */
106 __u64 uniforms;
107 __u64 bo_handles;
108
109 /* Size in bytes of the binner command list. */
110 __u32 bin_cl_size;
111 /* Size in bytes of the set of shader records. */
112 __u32 shader_rec_size;
113 /* Number of shader records.
114 *
115 * This could just be computed from the contents of shader_records and
116 * the address bits of references to them from the bin CL, but it
117 * keeps the kernel from having to resize some allocations it makes.
118 */
119 __u32 shader_rec_count;
120 /* Size in bytes of the uniform state. */
121 __u32 uniforms_size;
122
123 /* Number of BO handles passed in (size is that times 4). */
124 __u32 bo_handle_count;
125
126 /* RCL setup: */
127 __u16 width;
128 __u16 height;
129 __u8 min_x_tile;
130 __u8 min_y_tile;
131 __u8 max_x_tile;
132 __u8 max_y_tile;
133 struct drm_vc4_submit_rcl_surface color_read;
134 struct drm_vc4_submit_rcl_surface color_write;
135 struct drm_vc4_submit_rcl_surface zs_read;
136 struct drm_vc4_submit_rcl_surface zs_write;
137 struct drm_vc4_submit_rcl_surface msaa_color_write;
138 struct drm_vc4_submit_rcl_surface msaa_zs_write;
139 __u32 clear_color[2];
140 __u32 clear_z;
141 __u8 clear_s;
142
143 __u32 pad:24;
144
145#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
146 __u32 flags;
147
148 /* Returned value of the seqno of this render job (for the
149 * wait ioctl).
150 */
151 __u64 seqno;
152};
153
154/**
155 * struct drm_vc4_wait_seqno - ioctl argument for waiting for
156 * DRM_VC4_SUBMIT_CL completion using its returned seqno.
157 *
158 * timeout_ns is the timeout in nanoseconds, where "0" means "don't
159 * block, just return the status."
160 */
161struct drm_vc4_wait_seqno {
162 __u64 seqno;
163 __u64 timeout_ns;
164};
165
166/**
167 * struct drm_vc4_wait_bo - ioctl argument for waiting for
168 * completion of the last DRM_VC4_SUBMIT_CL on a BO.
169 *
170 * This is useful for cases where multiple processes might be
171 * rendering to a BO and you want to wait for all rendering to be
172 * completed.
173 */
174struct drm_vc4_wait_bo {
175 __u32 handle;
176 __u32 pad;
177 __u64 timeout_ns;
178};
179
180/**
181 * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
182 *
183 * There are currently no values for the flags argument, but it may be
184 * used in a future extension.
185 */
186struct drm_vc4_create_bo {
187 __u32 size;
188 __u32 flags;
189 /** Returned GEM handle for the BO. */
190 __u32 handle;
191 __u32 pad;
192};
193
194/**
195 * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
196 *
197 * This doesn't actually perform an mmap. Instead, it returns the
198 * offset you need to use in an mmap on the DRM device node. This
199 * means that tools like valgrind end up knowing about the mapped
200 * memory.
201 *
202 * There are currently no values for the flags argument, but it may be
203 * used in a future extension.
204 */
205struct drm_vc4_mmap_bo {
206 /** Handle for the object being mapped. */
207 __u32 handle;
208 __u32 flags;
209 /** offset into the drm node to use for subsequent mmap call. */
210 __u64 offset;
211};
212
213/**
214 * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
215 * shader BOs.
216 *
217 * Since allowing a shader to be overwritten while it's also being
218 * executed from would allow privlege escalation, shaders must be
219 * created using this ioctl, and they can't be mmapped later.
220 */
221struct drm_vc4_create_shader_bo {
222 /* Size of the data argument. */
223 __u32 size;
224 /* Flags, currently must be 0. */
225 __u32 flags;
226
227 /* Pointer to the data. */
228 __u64 data;
229
230 /** Returned GEM handle for the BO. */
231 __u32 handle;
232 /* Pad, must be 0. */
233 __u32 pad;
234};
235
236struct drm_vc4_get_hang_state_bo {
237 __u32 handle;
238 __u32 paddr;
239 __u32 size;
240 __u32 pad;
241};
242
243/**
244 * struct drm_vc4_hang_state - ioctl argument for collecting state
245 * from a GPU hang for analysis.
246*/
247struct drm_vc4_get_hang_state {
248 /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
249 __u64 bo;
250 /**
251 * On input, the size of the bo array. Output is the number
252 * of bos to be returned.
253 */
254 __u32 bo_count;
255
256 __u32 start_bin, start_render;
257
258 __u32 ct0ca, ct0ea;
259 __u32 ct1ca, ct1ea;
260 __u32 ct0cs, ct1cs;
261 __u32 ct0ra0, ct1ra0;
262
263 __u32 bpca, bpcs;
264 __u32 bpoa, bpos;
265
266 __u32 vpmbase;
267
268 __u32 dbge;
269 __u32 fdbgo;
270 __u32 fdbgb;
271 __u32 fdbgr;
272 __u32 fdbgs;
273 __u32 errstat;
274
275 /* Pad that we may save more registers into in the future. */
276 __u32 pad[16];
277};
278
279#endif /* _UAPI_VC4_DRM_H_ */
diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h
index 45bc80c3714b..fa21ed185520 100644
--- a/include/uapi/drm/via_drm.h
+++ b/include/uapi/drm/via_drm.h
@@ -24,7 +24,7 @@
24#ifndef _VIA_DRM_H_ 24#ifndef _VIA_DRM_H_
25#define _VIA_DRM_H_ 25#define _VIA_DRM_H_
26 26
27#include <drm/drm.h> 27#include "drm.h"
28 28
29/* WARNING: These defines must be the same as what the Xserver uses. 29/* WARNING: These defines must be the same as what the Xserver uses.
30 * if you change them, you must change the defines in the Xserver. 30 * if you change them, you must change the defines in the Xserver.
@@ -33,9 +33,6 @@
33#ifndef _VIA_DEFINES_ 33#ifndef _VIA_DEFINES_
34#define _VIA_DEFINES_ 34#define _VIA_DEFINES_
35 35
36#ifndef __KERNEL__
37#include "via_drmclient.h"
38#endif
39 36
40#define VIA_NR_SAREA_CLIPRECTS 8 37#define VIA_NR_SAREA_CLIPRECTS 8
41#define VIA_NR_XVMC_PORTS 10 38#define VIA_NR_XVMC_PORTS 10
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index fc9e2d6e5e2f..c74f1f90cb37 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -24,13 +24,12 @@
24#ifndef VIRTGPU_DRM_H 24#ifndef VIRTGPU_DRM_H
25#define VIRTGPU_DRM_H 25#define VIRTGPU_DRM_H
26 26
27#include <stddef.h> 27#include "drm.h"
28#include "drm/drm.h"
29 28
30/* Please note that modifications to all structs defined here are 29/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 30 * subject to backwards-compatibility constraints.
32 * 31 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 32 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 33 * compatibility Keep fields aligned to their size
35 */ 34 */
36 35
@@ -45,88 +44,88 @@
45#define DRM_VIRTGPU_GET_CAPS 0x09 44#define DRM_VIRTGPU_GET_CAPS 0x09
46 45
47struct drm_virtgpu_map { 46struct drm_virtgpu_map {
48 uint64_t offset; /* use for mmap system call */ 47 __u64 offset; /* use for mmap system call */
49 uint32_t handle; 48 __u32 handle;
50 uint32_t pad; 49 __u32 pad;
51}; 50};
52 51
53struct drm_virtgpu_execbuffer { 52struct drm_virtgpu_execbuffer {
54 uint32_t flags; /* for future use */ 53 __u32 flags; /* for future use */
55 uint32_t size; 54 __u32 size;
56 uint64_t command; /* void* */ 55 __u64 command; /* void* */
57 uint64_t bo_handles; 56 __u64 bo_handles;
58 uint32_t num_bo_handles; 57 __u32 num_bo_handles;
59 uint32_t pad; 58 __u32 pad;
60}; 59};
61 60
62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 61#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
63 62
64struct drm_virtgpu_getparam { 63struct drm_virtgpu_getparam {
65 uint64_t param; 64 __u64 param;
66 uint64_t value; 65 __u64 value;
67}; 66};
68 67
69/* NO_BO flags? NO resource flag? */ 68/* NO_BO flags? NO resource flag? */
70/* resource flag for y_0_top */ 69/* resource flag for y_0_top */
71struct drm_virtgpu_resource_create { 70struct drm_virtgpu_resource_create {
72 uint32_t target; 71 __u32 target;
73 uint32_t format; 72 __u32 format;
74 uint32_t bind; 73 __u32 bind;
75 uint32_t width; 74 __u32 width;
76 uint32_t height; 75 __u32 height;
77 uint32_t depth; 76 __u32 depth;
78 uint32_t array_size; 77 __u32 array_size;
79 uint32_t last_level; 78 __u32 last_level;
80 uint32_t nr_samples; 79 __u32 nr_samples;
81 uint32_t flags; 80 __u32 flags;
82 uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 81 __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
83 uint32_t res_handle; /* returned by kernel */ 82 __u32 res_handle; /* returned by kernel */
84 uint32_t size; /* validate transfer in the host */ 83 __u32 size; /* validate transfer in the host */
85 uint32_t stride; /* validate transfer in the host */ 84 __u32 stride; /* validate transfer in the host */
86}; 85};
87 86
88struct drm_virtgpu_resource_info { 87struct drm_virtgpu_resource_info {
89 uint32_t bo_handle; 88 __u32 bo_handle;
90 uint32_t res_handle; 89 __u32 res_handle;
91 uint32_t size; 90 __u32 size;
92 uint32_t stride; 91 __u32 stride;
93}; 92};
94 93
95struct drm_virtgpu_3d_box { 94struct drm_virtgpu_3d_box {
96 uint32_t x; 95 __u32 x;
97 uint32_t y; 96 __u32 y;
98 uint32_t z; 97 __u32 z;
99 uint32_t w; 98 __u32 w;
100 uint32_t h; 99 __u32 h;
101 uint32_t d; 100 __u32 d;
102}; 101};
103 102
104struct drm_virtgpu_3d_transfer_to_host { 103struct drm_virtgpu_3d_transfer_to_host {
105 uint32_t bo_handle; 104 __u32 bo_handle;
106 struct drm_virtgpu_3d_box box; 105 struct drm_virtgpu_3d_box box;
107 uint32_t level; 106 __u32 level;
108 uint32_t offset; 107 __u32 offset;
109}; 108};
110 109
111struct drm_virtgpu_3d_transfer_from_host { 110struct drm_virtgpu_3d_transfer_from_host {
112 uint32_t bo_handle; 111 __u32 bo_handle;
113 struct drm_virtgpu_3d_box box; 112 struct drm_virtgpu_3d_box box;
114 uint32_t level; 113 __u32 level;
115 uint32_t offset; 114 __u32 offset;
116}; 115};
117 116
118#define VIRTGPU_WAIT_NOWAIT 1 /* like it */ 117#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
119struct drm_virtgpu_3d_wait { 118struct drm_virtgpu_3d_wait {
120 uint32_t handle; /* 0 is an invalid handle */ 119 __u32 handle; /* 0 is an invalid handle */
121 uint32_t flags; 120 __u32 flags;
122}; 121};
123 122
124struct drm_virtgpu_get_caps { 123struct drm_virtgpu_get_caps {
125 uint32_t cap_set_id; 124 __u32 cap_set_id;
126 uint32_t cap_set_ver; 125 __u32 cap_set_ver;
127 uint64_t addr; 126 __u64 addr;
128 uint32_t size; 127 __u32 size;
129 uint32_t pad; 128 __u32 pad;
130}; 129};
131 130
132#define DRM_IOCTL_VIRTGPU_MAP \ 131#define DRM_IOCTL_VIRTGPU_MAP \
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 05b204954d16..5b68b4d10884 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -28,9 +28,7 @@
28#ifndef __VMWGFX_DRM_H__ 28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__ 29#define __VMWGFX_DRM_H__
30 30
31#ifndef __KERNEL__ 31#include "drm.h"
32#include <drm/drm.h>
33#endif
34 32
35#define DRM_VMW_MAX_SURFACE_FACES 6 33#define DRM_VMW_MAX_SURFACE_FACES 6
36#define DRM_VMW_MAX_MIP_LEVELS 24 34#define DRM_VMW_MAX_MIP_LEVELS 24
@@ -111,9 +109,9 @@ enum drm_vmw_handle_type {
111 */ 109 */
112 110
113struct drm_vmw_getparam_arg { 111struct drm_vmw_getparam_arg {
114 uint64_t value; 112 __u64 value;
115 uint32_t param; 113 __u32 param;
116 uint32_t pad64; 114 __u32 pad64;
117}; 115};
118 116
119/*************************************************************************/ 117/*************************************************************************/
@@ -134,8 +132,8 @@ struct drm_vmw_getparam_arg {
134 */ 132 */
135 133
136struct drm_vmw_context_arg { 134struct drm_vmw_context_arg {
137 int32_t cid; 135 __s32 cid;
138 uint32_t pad64; 136 __u32 pad64;
139}; 137};
140 138
141/*************************************************************************/ 139/*************************************************************************/
@@ -165,7 +163,7 @@ struct drm_vmw_context_arg {
165 * @mip_levels: Number of mip levels for each face. 163 * @mip_levels: Number of mip levels for each face.
166 * An unused face should have 0 encoded. 164 * An unused face should have 0 encoded.
167 * @size_addr: Address of a user-space array of sruct drm_vmw_size 165 * @size_addr: Address of a user-space array of sruct drm_vmw_size
168 * cast to an uint64_t for 32-64 bit compatibility. 166 * cast to an __u64 for 32-64 bit compatibility.
169 * The size of the array should equal the total number of mipmap levels. 167 * The size of the array should equal the total number of mipmap levels.
170 * @shareable: Boolean whether other clients (as identified by file descriptors) 168 * @shareable: Boolean whether other clients (as identified by file descriptors)
171 * may reference this surface. 169 * may reference this surface.
@@ -177,12 +175,12 @@ struct drm_vmw_context_arg {
177 */ 175 */
178 176
179struct drm_vmw_surface_create_req { 177struct drm_vmw_surface_create_req {
180 uint32_t flags; 178 __u32 flags;
181 uint32_t format; 179 __u32 format;
182 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 180 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
183 uint64_t size_addr; 181 __u64 size_addr;
184 int32_t shareable; 182 __s32 shareable;
185 int32_t scanout; 183 __s32 scanout;
186}; 184};
187 185
188/** 186/**
@@ -197,7 +195,7 @@ struct drm_vmw_surface_create_req {
197 */ 195 */
198 196
199struct drm_vmw_surface_arg { 197struct drm_vmw_surface_arg {
200 int32_t sid; 198 __s32 sid;
201 enum drm_vmw_handle_type handle_type; 199 enum drm_vmw_handle_type handle_type;
202}; 200};
203 201
@@ -213,10 +211,10 @@ struct drm_vmw_surface_arg {
213 */ 211 */
214 212
215struct drm_vmw_size { 213struct drm_vmw_size {
216 uint32_t width; 214 __u32 width;
217 uint32_t height; 215 __u32 height;
218 uint32_t depth; 216 __u32 depth;
219 uint32_t pad64; 217 __u32 pad64;
220}; 218};
221 219
222/** 220/**
@@ -284,13 +282,13 @@ union drm_vmw_surface_reference_arg {
284/** 282/**
285 * struct drm_vmw_execbuf_arg 283 * struct drm_vmw_execbuf_arg
286 * 284 *
287 * @commands: User-space address of a command buffer cast to an uint64_t. 285 * @commands: User-space address of a command buffer cast to an __u64.
288 * @command-size: Size in bytes of the command buffer. 286 * @command-size: Size in bytes of the command buffer.
289 * @throttle-us: Sleep until software is less than @throttle_us 287 * @throttle-us: Sleep until software is less than @throttle_us
290 * microseconds ahead of hardware. The driver may round this value 288 * microseconds ahead of hardware. The driver may round this value
291 * to the nearest kernel tick. 289 * to the nearest kernel tick.
292 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 290 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
293 * uint64_t. 291 * __u64.
294 * @version: Allows expanding the execbuf ioctl parameters without breaking 292 * @version: Allows expanding the execbuf ioctl parameters without breaking
295 * backwards compatibility, since user-space will always tell the kernel 293 * backwards compatibility, since user-space will always tell the kernel
296 * which version it uses. 294 * which version it uses.
@@ -302,14 +300,14 @@ union drm_vmw_surface_reference_arg {
302#define DRM_VMW_EXECBUF_VERSION 2 300#define DRM_VMW_EXECBUF_VERSION 2
303 301
304struct drm_vmw_execbuf_arg { 302struct drm_vmw_execbuf_arg {
305 uint64_t commands; 303 __u64 commands;
306 uint32_t command_size; 304 __u32 command_size;
307 uint32_t throttle_us; 305 __u32 throttle_us;
308 uint64_t fence_rep; 306 __u64 fence_rep;
309 uint32_t version; 307 __u32 version;
310 uint32_t flags; 308 __u32 flags;
311 uint32_t context_handle; 309 __u32 context_handle;
312 uint32_t pad64; 310 __u32 pad64;
313}; 311};
314 312
315/** 313/**
@@ -338,12 +336,12 @@ struct drm_vmw_execbuf_arg {
338 */ 336 */
339 337
340struct drm_vmw_fence_rep { 338struct drm_vmw_fence_rep {
341 uint32_t handle; 339 __u32 handle;
342 uint32_t mask; 340 __u32 mask;
343 uint32_t seqno; 341 __u32 seqno;
344 uint32_t passed_seqno; 342 __u32 passed_seqno;
345 uint32_t pad64; 343 __u32 pad64;
346 int32_t error; 344 __s32 error;
347}; 345};
348 346
349/*************************************************************************/ 347/*************************************************************************/
@@ -373,8 +371,8 @@ struct drm_vmw_fence_rep {
373 */ 371 */
374 372
375struct drm_vmw_alloc_dmabuf_req { 373struct drm_vmw_alloc_dmabuf_req {
376 uint32_t size; 374 __u32 size;
377 uint32_t pad64; 375 __u32 pad64;
378}; 376};
379 377
380/** 378/**
@@ -391,11 +389,11 @@ struct drm_vmw_alloc_dmabuf_req {
391 */ 389 */
392 390
393struct drm_vmw_dmabuf_rep { 391struct drm_vmw_dmabuf_rep {
394 uint64_t map_handle; 392 __u64 map_handle;
395 uint32_t handle; 393 __u32 handle;
396 uint32_t cur_gmr_id; 394 __u32 cur_gmr_id;
397 uint32_t cur_gmr_offset; 395 __u32 cur_gmr_offset;
398 uint32_t pad64; 396 __u32 pad64;
399}; 397};
400 398
401/** 399/**
@@ -428,8 +426,8 @@ union drm_vmw_alloc_dmabuf_arg {
428 */ 426 */
429 427
430struct drm_vmw_unref_dmabuf_arg { 428struct drm_vmw_unref_dmabuf_arg {
431 uint32_t handle; 429 __u32 handle;
432 uint32_t pad64; 430 __u32 pad64;
433}; 431};
434 432
435/*************************************************************************/ 433/*************************************************************************/
@@ -452,10 +450,10 @@ struct drm_vmw_unref_dmabuf_arg {
452 */ 450 */
453 451
454struct drm_vmw_rect { 452struct drm_vmw_rect {
455 int32_t x; 453 __s32 x;
456 int32_t y; 454 __s32 y;
457 uint32_t w; 455 __u32 w;
458 uint32_t h; 456 __u32 h;
459}; 457};
460 458
461/** 459/**
@@ -477,21 +475,21 @@ struct drm_vmw_rect {
477 */ 475 */
478 476
479struct drm_vmw_control_stream_arg { 477struct drm_vmw_control_stream_arg {
480 uint32_t stream_id; 478 __u32 stream_id;
481 uint32_t enabled; 479 __u32 enabled;
482 480
483 uint32_t flags; 481 __u32 flags;
484 uint32_t color_key; 482 __u32 color_key;
485 483
486 uint32_t handle; 484 __u32 handle;
487 uint32_t offset; 485 __u32 offset;
488 int32_t format; 486 __s32 format;
489 uint32_t size; 487 __u32 size;
490 uint32_t width; 488 __u32 width;
491 uint32_t height; 489 __u32 height;
492 uint32_t pitch[3]; 490 __u32 pitch[3];
493 491
494 uint32_t pad64; 492 __u32 pad64;
495 struct drm_vmw_rect src; 493 struct drm_vmw_rect src;
496 struct drm_vmw_rect dst; 494 struct drm_vmw_rect dst;
497}; 495};
@@ -519,12 +517,12 @@ struct drm_vmw_control_stream_arg {
519 */ 517 */
520 518
521struct drm_vmw_cursor_bypass_arg { 519struct drm_vmw_cursor_bypass_arg {
522 uint32_t flags; 520 __u32 flags;
523 uint32_t crtc_id; 521 __u32 crtc_id;
524 int32_t xpos; 522 __s32 xpos;
525 int32_t ypos; 523 __s32 ypos;
526 int32_t xhot; 524 __s32 xhot;
527 int32_t yhot; 525 __s32 yhot;
528}; 526};
529 527
530/*************************************************************************/ 528/*************************************************************************/
@@ -542,8 +540,8 @@ struct drm_vmw_cursor_bypass_arg {
542 */ 540 */
543 541
544struct drm_vmw_stream_arg { 542struct drm_vmw_stream_arg {
545 uint32_t stream_id; 543 __u32 stream_id;
546 uint32_t pad64; 544 __u32 pad64;
547}; 545};
548 546
549/*************************************************************************/ 547/*************************************************************************/
@@ -565,7 +563,7 @@ struct drm_vmw_stream_arg {
565/** 563/**
566 * struct drm_vmw_get_3d_cap_arg 564 * struct drm_vmw_get_3d_cap_arg
567 * 565 *
568 * @buffer: Pointer to a buffer for capability data, cast to an uint64_t 566 * @buffer: Pointer to a buffer for capability data, cast to an __u64
569 * @size: Max size to copy 567 * @size: Max size to copy
570 * 568 *
571 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 569 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
@@ -573,9 +571,9 @@ struct drm_vmw_stream_arg {
573 */ 571 */
574 572
575struct drm_vmw_get_3d_cap_arg { 573struct drm_vmw_get_3d_cap_arg {
576 uint64_t buffer; 574 __u64 buffer;
577 uint32_t max_size; 575 __u32 max_size;
578 uint32_t pad64; 576 __u32 pad64;
579}; 577};
580 578
581/*************************************************************************/ 579/*************************************************************************/
@@ -624,14 +622,14 @@ struct drm_vmw_get_3d_cap_arg {
624 */ 622 */
625 623
626struct drm_vmw_fence_wait_arg { 624struct drm_vmw_fence_wait_arg {
627 uint32_t handle; 625 __u32 handle;
628 int32_t cookie_valid; 626 __s32 cookie_valid;
629 uint64_t kernel_cookie; 627 __u64 kernel_cookie;
630 uint64_t timeout_us; 628 __u64 timeout_us;
631 int32_t lazy; 629 __s32 lazy;
632 int32_t flags; 630 __s32 flags;
633 int32_t wait_options; 631 __s32 wait_options;
634 int32_t pad64; 632 __s32 pad64;
635}; 633};
636 634
637/*************************************************************************/ 635/*************************************************************************/
@@ -655,12 +653,12 @@ struct drm_vmw_fence_wait_arg {
655 */ 653 */
656 654
657struct drm_vmw_fence_signaled_arg { 655struct drm_vmw_fence_signaled_arg {
658 uint32_t handle; 656 __u32 handle;
659 uint32_t flags; 657 __u32 flags;
660 int32_t signaled; 658 __s32 signaled;
661 uint32_t passed_seqno; 659 __u32 passed_seqno;
662 uint32_t signaled_flags; 660 __u32 signaled_flags;
663 uint32_t pad64; 661 __u32 pad64;
664}; 662};
665 663
666/*************************************************************************/ 664/*************************************************************************/
@@ -681,8 +679,8 @@ struct drm_vmw_fence_signaled_arg {
681 */ 679 */
682 680
683struct drm_vmw_fence_arg { 681struct drm_vmw_fence_arg {
684 uint32_t handle; 682 __u32 handle;
685 uint32_t pad64; 683 __u32 pad64;
686}; 684};
687 685
688 686
@@ -703,9 +701,9 @@ struct drm_vmw_fence_arg {
703 701
704struct drm_vmw_event_fence { 702struct drm_vmw_event_fence {
705 struct drm_event base; 703 struct drm_event base;
706 uint64_t user_data; 704 __u64 user_data;
707 uint32_t tv_sec; 705 __u32 tv_sec;
708 uint32_t tv_usec; 706 __u32 tv_usec;
709}; 707};
710 708
711/* 709/*
@@ -717,17 +715,17 @@ struct drm_vmw_event_fence {
717/** 715/**
718 * struct drm_vmw_fence_event_arg 716 * struct drm_vmw_fence_event_arg
719 * 717 *
720 * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if 718 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
721 * the fence is not supposed to be referenced by user-space. 719 * the fence is not supposed to be referenced by user-space.
722 * @user_info: Info to be delivered with the event. 720 * @user_info: Info to be delivered with the event.
723 * @handle: Attach the event to this fence only. 721 * @handle: Attach the event to this fence only.
724 * @flags: A set of flags as defined above. 722 * @flags: A set of flags as defined above.
725 */ 723 */
726struct drm_vmw_fence_event_arg { 724struct drm_vmw_fence_event_arg {
727 uint64_t fence_rep; 725 __u64 fence_rep;
728 uint64_t user_data; 726 __u64 user_data;
729 uint32_t handle; 727 __u32 handle;
730 uint32_t flags; 728 __u32 flags;
731}; 729};
732 730
733 731
@@ -747,7 +745,7 @@ struct drm_vmw_fence_event_arg {
747 * @sid: Surface id to present from. 745 * @sid: Surface id to present from.
748 * @dest_x: X placement coordinate for surface. 746 * @dest_x: X placement coordinate for surface.
749 * @dest_y: Y placement coordinate for surface. 747 * @dest_y: Y placement coordinate for surface.
750 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 748 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
751 * @num_clips: Number of cliprects given relative to the framebuffer origin, 749 * @num_clips: Number of cliprects given relative to the framebuffer origin,
752 * in the same coordinate space as the frame buffer. 750 * in the same coordinate space as the frame buffer.
753 * @pad64: Unused 64-bit padding. 751 * @pad64: Unused 64-bit padding.
@@ -756,13 +754,13 @@ struct drm_vmw_fence_event_arg {
756 */ 754 */
757 755
758struct drm_vmw_present_arg { 756struct drm_vmw_present_arg {
759 uint32_t fb_id; 757 __u32 fb_id;
760 uint32_t sid; 758 __u32 sid;
761 int32_t dest_x; 759 __s32 dest_x;
762 int32_t dest_y; 760 __s32 dest_y;
763 uint64_t clips_ptr; 761 __u64 clips_ptr;
764 uint32_t num_clips; 762 __u32 num_clips;
765 uint32_t pad64; 763 __u32 pad64;
766}; 764};
767 765
768 766
@@ -780,16 +778,16 @@ struct drm_vmw_present_arg {
780 * struct drm_vmw_present_arg 778 * struct drm_vmw_present_arg
781 * @fb_id: fb_id to present / read back from. 779 * @fb_id: fb_id to present / read back from.
782 * @num_clips: Number of cliprects. 780 * @num_clips: Number of cliprects.
783 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 781 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
784 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. 782 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
785 * If this member is NULL, then the ioctl should not return a fence. 783 * If this member is NULL, then the ioctl should not return a fence.
786 */ 784 */
787 785
788struct drm_vmw_present_readback_arg { 786struct drm_vmw_present_readback_arg {
789 uint32_t fb_id; 787 __u32 fb_id;
790 uint32_t num_clips; 788 __u32 num_clips;
791 uint64_t clips_ptr; 789 __u64 clips_ptr;
792 uint64_t fence_rep; 790 __u64 fence_rep;
793}; 791};
794 792
795/*************************************************************************/ 793/*************************************************************************/
@@ -805,14 +803,14 @@ struct drm_vmw_present_readback_arg {
805 * struct drm_vmw_update_layout_arg 803 * struct drm_vmw_update_layout_arg
806 * 804 *
807 * @num_outputs: number of active connectors 805 * @num_outputs: number of active connectors
808 * @rects: pointer to array of drm_vmw_rect cast to an uint64_t 806 * @rects: pointer to array of drm_vmw_rect cast to an __u64
809 * 807 *
810 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 808 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
811 */ 809 */
812struct drm_vmw_update_layout_arg { 810struct drm_vmw_update_layout_arg {
813 uint32_t num_outputs; 811 __u32 num_outputs;
814 uint32_t pad64; 812 __u32 pad64;
815 uint64_t rects; 813 __u64 rects;
816}; 814};
817 815
818 816
@@ -849,10 +847,10 @@ enum drm_vmw_shader_type {
849 */ 847 */
850struct drm_vmw_shader_create_arg { 848struct drm_vmw_shader_create_arg {
851 enum drm_vmw_shader_type shader_type; 849 enum drm_vmw_shader_type shader_type;
852 uint32_t size; 850 __u32 size;
853 uint32_t buffer_handle; 851 __u32 buffer_handle;
854 uint32_t shader_handle; 852 __u32 shader_handle;
855 uint64_t offset; 853 __u64 offset;
856}; 854};
857 855
858/*************************************************************************/ 856/*************************************************************************/
@@ -871,8 +869,8 @@ struct drm_vmw_shader_create_arg {
871 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 869 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
872 */ 870 */
873struct drm_vmw_shader_arg { 871struct drm_vmw_shader_arg {
874 uint32_t handle; 872 __u32 handle;
875 uint32_t pad64; 873 __u32 pad64;
876}; 874};
877 875
878/*************************************************************************/ 876/*************************************************************************/
@@ -918,14 +916,14 @@ enum drm_vmw_surface_flags {
918 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 916 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
919 */ 917 */
920struct drm_vmw_gb_surface_create_req { 918struct drm_vmw_gb_surface_create_req {
921 uint32_t svga3d_flags; 919 __u32 svga3d_flags;
922 uint32_t format; 920 __u32 format;
923 uint32_t mip_levels; 921 __u32 mip_levels;
924 enum drm_vmw_surface_flags drm_surface_flags; 922 enum drm_vmw_surface_flags drm_surface_flags;
925 uint32_t multisample_count; 923 __u32 multisample_count;
926 uint32_t autogen_filter; 924 __u32 autogen_filter;
927 uint32_t buffer_handle; 925 __u32 buffer_handle;
928 uint32_t array_size; 926 __u32 array_size;
929 struct drm_vmw_size base_size; 927 struct drm_vmw_size base_size;
930}; 928};
931 929
@@ -944,11 +942,11 @@ struct drm_vmw_gb_surface_create_req {
944 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 942 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
945 */ 943 */
946struct drm_vmw_gb_surface_create_rep { 944struct drm_vmw_gb_surface_create_rep {
947 uint32_t handle; 945 __u32 handle;
948 uint32_t backup_size; 946 __u32 backup_size;
949 uint32_t buffer_handle; 947 __u32 buffer_handle;
950 uint32_t buffer_size; 948 __u32 buffer_size;
951 uint64_t buffer_map_handle; 949 __u64 buffer_map_handle;
952}; 950};
953 951
954/** 952/**
@@ -1061,8 +1059,8 @@ enum drm_vmw_synccpu_op {
1061struct drm_vmw_synccpu_arg { 1059struct drm_vmw_synccpu_arg {
1062 enum drm_vmw_synccpu_op op; 1060 enum drm_vmw_synccpu_op op;
1063 enum drm_vmw_synccpu_flags flags; 1061 enum drm_vmw_synccpu_flags flags;
1064 uint32_t handle; 1062 __u32 handle;
1065 uint32_t pad64; 1063 __u32 pad64;
1066}; 1064};
1067 1065
1068/*************************************************************************/ 1066/*************************************************************************/
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 628e6e64c2fb..c2e5d6cb34e3 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -186,6 +186,7 @@ header-y += if_tunnel.h
186header-y += if_vlan.h 186header-y += if_vlan.h
187header-y += if_x25.h 187header-y += if_x25.h
188header-y += igmp.h 188header-y += igmp.h
189header-y += ila.h
189header-y += in6.h 190header-y += in6.h
190header-y += inet_diag.h 191header-y += inet_diag.h
191header-y += in.h 192header-y += in.h
diff --git a/include/uapi/linux/agpgart.h b/include/uapi/linux/agpgart.h
index 4e828cf487bc..f5251045181a 100644
--- a/include/uapi/linux/agpgart.h
+++ b/include/uapi/linux/agpgart.h
@@ -52,6 +52,7 @@
52 52
53#ifndef __KERNEL__ 53#ifndef __KERNEL__
54#include <linux/types.h> 54#include <linux/types.h>
55#include <stdlib.h>
55 56
56struct agp_version { 57struct agp_version {
57 __u16 major; 58 __u16 major;
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 28ccedd000f5..a27222d5b413 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -628,7 +628,7 @@ struct ovs_action_hash {
628 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the 628 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
629 * mask, the corresponding bit in the value is copied to the connection 629 * mask, the corresponding bit in the value is copied to the connection
630 * tracking mark field in the connection. 630 * tracking mark field in the connection.
631 * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN 631 * @OVS_CT_ATTR_LABELS: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
632 * mask. For each bit set in the mask, the corresponding bit in the value is 632 * mask. For each bit set in the mask, the corresponding bit in the value is
633 * copied to the connection tracking label field in the connection. 633 * copied to the connection tracking label field in the connection.
634 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. 634 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 751b69f858c8..9fd7b5d8df2f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -39,13 +39,6 @@
39#define VFIO_SPAPR_TCE_v2_IOMMU 7 39#define VFIO_SPAPR_TCE_v2_IOMMU 7
40 40
41/* 41/*
42 * The No-IOMMU IOMMU offers no translation or isolation for devices and
43 * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU
44 * code will taint the host kernel and should be used with extreme caution.
45 */
46#define VFIO_NOIOMMU_IOMMU 8
47
48/*
49 * The IOCTL interface is designed for extensibility by embedding the 42 * The IOCTL interface is designed for extensibility by embedding the
50 * structure length (argsz) and flags into structures passed between 43 * structure length (argsz) and flags into structures passed between
51 * kernel and userspace. We therefore use the _IO() macro for these 44 * kernel and userspace. We therefore use the _IO() macro for these
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 7a63faa9065c..4b04ead26cd9 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -287,7 +287,7 @@ struct virtio_gpu_get_capset {
287/* VIRTIO_GPU_RESP_OK_CAPSET */ 287/* VIRTIO_GPU_RESP_OK_CAPSET */
288struct virtio_gpu_resp_capset { 288struct virtio_gpu_resp_capset {
289 struct virtio_gpu_ctrl_hdr hdr; 289 struct virtio_gpu_ctrl_hdr hdr;
290 uint8_t capset_data[]; 290 __u8 capset_data[];
291}; 291};
292 292
293#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) 293#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 7d28aff605c7..7dc685b4057d 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -181,6 +181,20 @@ struct __name##_back_ring { \
181#define RING_GET_REQUEST(_r, _idx) \ 181#define RING_GET_REQUEST(_r, _idx) \
182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
183 183
184/*
185 * Get a local copy of a request.
186 *
187 * Use this in preference to RING_GET_REQUEST() so all processing is
188 * done on a local copy that cannot be modified by the other end.
189 *
190 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
191 * to be ineffective where _req is a struct which consists of only bitfields.
192 */
193#define RING_COPY_REQUEST(_r, _idx, _req) do { \
194 /* Use volatile to force the copy into _req. */ \
195 *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
196} while (0)
197
184#define RING_GET_RESPONSE(_r, _idx) \ 198#define RING_GET_RESPONSE(_r, _idx) \
185 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 199 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
186 200
diff --git a/init/Kconfig b/init/Kconfig
index c24b6f767bf0..235c7a2c0d20 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2030,13 +2030,6 @@ config INIT_ALL_POSSIBLE
2030 it was better to provide this option than to break all the archs 2030 it was better to provide this option than to break all the archs
2031 and have several arch maintainers pursuing me down dark alleys. 2031 and have several arch maintainers pursuing me down dark alleys.
2032 2032
2033config STOP_MACHINE
2034 bool
2035 default y
2036 depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
2037 help
2038 Need stop_machine() primitive.
2039
2040source "block/Kconfig" 2033source "block/Kconfig"
2041 2034
2042config PREEMPT_NOTIFIERS 2035config PREEMPT_NOTIFIERS
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f1603c153890..470f6536b9e8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -98,6 +98,12 @@ static DEFINE_SPINLOCK(css_set_lock);
98static DEFINE_SPINLOCK(cgroup_idr_lock); 98static DEFINE_SPINLOCK(cgroup_idr_lock);
99 99
100/* 100/*
101 * Protects cgroup_file->kn for !self csses. It synchronizes notifications
102 * against file removal/re-creation across css hiding.
103 */
104static DEFINE_SPINLOCK(cgroup_file_kn_lock);
105
106/*
101 * Protects cgroup_subsys->release_agent_path. Modifying it also requires 107 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
102 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. 108 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
103 */ 109 */
@@ -754,9 +760,11 @@ static void put_css_set_locked(struct css_set *cset)
754 if (!atomic_dec_and_test(&cset->refcount)) 760 if (!atomic_dec_and_test(&cset->refcount))
755 return; 761 return;
756 762
757 /* This css_set is dead. unlink it and release cgroup refcounts */ 763 /* This css_set is dead. unlink it and release cgroup and css refs */
758 for_each_subsys(ss, ssid) 764 for_each_subsys(ss, ssid) {
759 list_del(&cset->e_cset_node[ssid]); 765 list_del(&cset->e_cset_node[ssid]);
766 css_put(cset->subsys[ssid]);
767 }
760 hash_del(&cset->hlist); 768 hash_del(&cset->hlist);
761 css_set_count--; 769 css_set_count--;
762 770
@@ -1056,9 +1064,13 @@ static struct css_set *find_css_set(struct css_set *old_cset,
1056 key = css_set_hash(cset->subsys); 1064 key = css_set_hash(cset->subsys);
1057 hash_add(css_set_table, &cset->hlist, key); 1065 hash_add(css_set_table, &cset->hlist, key);
1058 1066
1059 for_each_subsys(ss, ssid) 1067 for_each_subsys(ss, ssid) {
1068 struct cgroup_subsys_state *css = cset->subsys[ssid];
1069
1060 list_add_tail(&cset->e_cset_node[ssid], 1070 list_add_tail(&cset->e_cset_node[ssid],
1061 &cset->subsys[ssid]->cgroup->e_csets[ssid]); 1071 &css->cgroup->e_csets[ssid]);
1072 css_get(css);
1073 }
1062 1074
1063 spin_unlock_bh(&css_set_lock); 1075 spin_unlock_bh(&css_set_lock);
1064 1076
@@ -1393,6 +1405,16 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1393 char name[CGROUP_FILE_NAME_MAX]; 1405 char name[CGROUP_FILE_NAME_MAX];
1394 1406
1395 lockdep_assert_held(&cgroup_mutex); 1407 lockdep_assert_held(&cgroup_mutex);
1408
1409 if (cft->file_offset) {
1410 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1411 struct cgroup_file *cfile = (void *)css + cft->file_offset;
1412
1413 spin_lock_irq(&cgroup_file_kn_lock);
1414 cfile->kn = NULL;
1415 spin_unlock_irq(&cgroup_file_kn_lock);
1416 }
1417
1396 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); 1418 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1397} 1419}
1398 1420
@@ -1856,7 +1878,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
1856 1878
1857 INIT_LIST_HEAD(&cgrp->self.sibling); 1879 INIT_LIST_HEAD(&cgrp->self.sibling);
1858 INIT_LIST_HEAD(&cgrp->self.children); 1880 INIT_LIST_HEAD(&cgrp->self.children);
1859 INIT_LIST_HEAD(&cgrp->self.files);
1860 INIT_LIST_HEAD(&cgrp->cset_links); 1881 INIT_LIST_HEAD(&cgrp->cset_links);
1861 INIT_LIST_HEAD(&cgrp->pidlists); 1882 INIT_LIST_HEAD(&cgrp->pidlists);
1862 mutex_init(&cgrp->pidlist_mutex); 1883 mutex_init(&cgrp->pidlist_mutex);
@@ -2216,6 +2237,9 @@ struct cgroup_taskset {
2216 struct list_head src_csets; 2237 struct list_head src_csets;
2217 struct list_head dst_csets; 2238 struct list_head dst_csets;
2218 2239
2240 /* the subsys currently being processed */
2241 int ssid;
2242
2219 /* 2243 /*
2220 * Fields for cgroup_taskset_*() iteration. 2244 * Fields for cgroup_taskset_*() iteration.
2221 * 2245 *
@@ -2278,25 +2302,29 @@ static void cgroup_taskset_add(struct task_struct *task,
2278/** 2302/**
2279 * cgroup_taskset_first - reset taskset and return the first task 2303 * cgroup_taskset_first - reset taskset and return the first task
2280 * @tset: taskset of interest 2304 * @tset: taskset of interest
2305 * @dst_cssp: output variable for the destination css
2281 * 2306 *
2282 * @tset iteration is initialized and the first task is returned. 2307 * @tset iteration is initialized and the first task is returned.
2283 */ 2308 */
2284struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) 2309struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2310 struct cgroup_subsys_state **dst_cssp)
2285{ 2311{
2286 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); 2312 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2287 tset->cur_task = NULL; 2313 tset->cur_task = NULL;
2288 2314
2289 return cgroup_taskset_next(tset); 2315 return cgroup_taskset_next(tset, dst_cssp);
2290} 2316}
2291 2317
2292/** 2318/**
2293 * cgroup_taskset_next - iterate to the next task in taskset 2319 * cgroup_taskset_next - iterate to the next task in taskset
2294 * @tset: taskset of interest 2320 * @tset: taskset of interest
2321 * @dst_cssp: output variable for the destination css
2295 * 2322 *
2296 * Return the next task in @tset. Iteration must have been initialized 2323 * Return the next task in @tset. Iteration must have been initialized
2297 * with cgroup_taskset_first(). 2324 * with cgroup_taskset_first().
2298 */ 2325 */
2299struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) 2326struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2327 struct cgroup_subsys_state **dst_cssp)
2300{ 2328{
2301 struct css_set *cset = tset->cur_cset; 2329 struct css_set *cset = tset->cur_cset;
2302 struct task_struct *task = tset->cur_task; 2330 struct task_struct *task = tset->cur_task;
@@ -2311,6 +2339,18 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
2311 if (&task->cg_list != &cset->mg_tasks) { 2339 if (&task->cg_list != &cset->mg_tasks) {
2312 tset->cur_cset = cset; 2340 tset->cur_cset = cset;
2313 tset->cur_task = task; 2341 tset->cur_task = task;
2342
2343 /*
2344 * This function may be called both before and
2345 * after cgroup_taskset_migrate(). The two cases
2346 * can be distinguished by looking at whether @cset
2347 * has its ->mg_dst_cset set.
2348 */
2349 if (cset->mg_dst_cset)
2350 *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2351 else
2352 *dst_cssp = cset->subsys[tset->ssid];
2353
2314 return task; 2354 return task;
2315 } 2355 }
2316 2356
@@ -2346,7 +2386,8 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2346 /* check that we can legitimately attach to the cgroup */ 2386 /* check that we can legitimately attach to the cgroup */
2347 for_each_e_css(css, i, dst_cgrp) { 2387 for_each_e_css(css, i, dst_cgrp) {
2348 if (css->ss->can_attach) { 2388 if (css->ss->can_attach) {
2349 ret = css->ss->can_attach(css, tset); 2389 tset->ssid = i;
2390 ret = css->ss->can_attach(tset);
2350 if (ret) { 2391 if (ret) {
2351 failed_css = css; 2392 failed_css = css;
2352 goto out_cancel_attach; 2393 goto out_cancel_attach;
@@ -2379,9 +2420,12 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2379 */ 2420 */
2380 tset->csets = &tset->dst_csets; 2421 tset->csets = &tset->dst_csets;
2381 2422
2382 for_each_e_css(css, i, dst_cgrp) 2423 for_each_e_css(css, i, dst_cgrp) {
2383 if (css->ss->attach) 2424 if (css->ss->attach) {
2384 css->ss->attach(css, tset); 2425 tset->ssid = i;
2426 css->ss->attach(tset);
2427 }
2428 }
2385 2429
2386 ret = 0; 2430 ret = 0;
2387 goto out_release_tset; 2431 goto out_release_tset;
@@ -2390,8 +2434,10 @@ out_cancel_attach:
2390 for_each_e_css(css, i, dst_cgrp) { 2434 for_each_e_css(css, i, dst_cgrp) {
2391 if (css == failed_css) 2435 if (css == failed_css)
2392 break; 2436 break;
2393 if (css->ss->cancel_attach) 2437 if (css->ss->cancel_attach) {
2394 css->ss->cancel_attach(css, tset); 2438 tset->ssid = i;
2439 css->ss->cancel_attach(tset);
2440 }
2395 } 2441 }
2396out_release_tset: 2442out_release_tset:
2397 spin_lock_bh(&css_set_lock); 2443 spin_lock_bh(&css_set_lock);
@@ -3313,9 +3359,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3313 if (cft->file_offset) { 3359 if (cft->file_offset) {
3314 struct cgroup_file *cfile = (void *)css + cft->file_offset; 3360 struct cgroup_file *cfile = (void *)css + cft->file_offset;
3315 3361
3316 kernfs_get(kn); 3362 spin_lock_irq(&cgroup_file_kn_lock);
3317 cfile->kn = kn; 3363 cfile->kn = kn;
3318 list_add(&cfile->node, &css->files); 3364 spin_unlock_irq(&cgroup_file_kn_lock);
3319 } 3365 }
3320 3366
3321 return 0; 3367 return 0;
@@ -3553,6 +3599,22 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3553} 3599}
3554 3600
3555/** 3601/**
3602 * cgroup_file_notify - generate a file modified event for a cgroup_file
3603 * @cfile: target cgroup_file
3604 *
3605 * @cfile must have been obtained by setting cftype->file_offset.
3606 */
3607void cgroup_file_notify(struct cgroup_file *cfile)
3608{
3609 unsigned long flags;
3610
3611 spin_lock_irqsave(&cgroup_file_kn_lock, flags);
3612 if (cfile->kn)
3613 kernfs_notify(cfile->kn);
3614 spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
3615}
3616
3617/**
3556 * cgroup_task_count - count the number of tasks in a cgroup. 3618 * cgroup_task_count - count the number of tasks in a cgroup.
3557 * @cgrp: the cgroup in question 3619 * @cgrp: the cgroup in question
3558 * 3620 *
@@ -4613,13 +4675,9 @@ static void css_free_work_fn(struct work_struct *work)
4613 container_of(work, struct cgroup_subsys_state, destroy_work); 4675 container_of(work, struct cgroup_subsys_state, destroy_work);
4614 struct cgroup_subsys *ss = css->ss; 4676 struct cgroup_subsys *ss = css->ss;
4615 struct cgroup *cgrp = css->cgroup; 4677 struct cgroup *cgrp = css->cgroup;
4616 struct cgroup_file *cfile;
4617 4678
4618 percpu_ref_exit(&css->refcnt); 4679 percpu_ref_exit(&css->refcnt);
4619 4680
4620 list_for_each_entry(cfile, &css->files, node)
4621 kernfs_put(cfile->kn);
4622
4623 if (ss) { 4681 if (ss) {
4624 /* css free path */ 4682 /* css free path */
4625 int id = css->id; 4683 int id = css->id;
@@ -4724,7 +4782,6 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
4724 css->ss = ss; 4782 css->ss = ss;
4725 INIT_LIST_HEAD(&css->sibling); 4783 INIT_LIST_HEAD(&css->sibling);
4726 INIT_LIST_HEAD(&css->children); 4784 INIT_LIST_HEAD(&css->children);
4727 INIT_LIST_HEAD(&css->files);
4728 css->serial_nr = css_serial_nr_next++; 4785 css->serial_nr = css_serial_nr_next++;
4729 4786
4730 if (cgroup_parent(cgrp)) { 4787 if (cgroup_parent(cgrp)) {
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index f1b30ad5dc6d..2d3df82c54f2 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -155,12 +155,10 @@ static void freezer_css_free(struct cgroup_subsys_state *css)
155 * @freezer->lock. freezer_attach() makes the new tasks conform to the 155 * @freezer->lock. freezer_attach() makes the new tasks conform to the
156 * current state and all following state changes can see the new tasks. 156 * current state and all following state changes can see the new tasks.
157 */ 157 */
158static void freezer_attach(struct cgroup_subsys_state *new_css, 158static void freezer_attach(struct cgroup_taskset *tset)
159 struct cgroup_taskset *tset)
160{ 159{
161 struct freezer *freezer = css_freezer(new_css);
162 struct task_struct *task; 160 struct task_struct *task;
163 bool clear_frozen = false; 161 struct cgroup_subsys_state *new_css;
164 162
165 mutex_lock(&freezer_mutex); 163 mutex_lock(&freezer_mutex);
166 164
@@ -174,22 +172,21 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
174 * current state before executing the following - !frozen tasks may 172 * current state before executing the following - !frozen tasks may
175 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. 173 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
176 */ 174 */
177 cgroup_taskset_for_each(task, tset) { 175 cgroup_taskset_for_each(task, new_css, tset) {
176 struct freezer *freezer = css_freezer(new_css);
177
178 if (!(freezer->state & CGROUP_FREEZING)) { 178 if (!(freezer->state & CGROUP_FREEZING)) {
179 __thaw_task(task); 179 __thaw_task(task);
180 } else { 180 } else {
181 freeze_task(task); 181 freeze_task(task);
182 freezer->state &= ~CGROUP_FROZEN; 182 /* clear FROZEN and propagate upwards */
183 clear_frozen = true; 183 while (freezer && (freezer->state & CGROUP_FROZEN)) {
184 freezer->state &= ~CGROUP_FROZEN;
185 freezer = parent_freezer(freezer);
186 }
184 } 187 }
185 } 188 }
186 189
187 /* propagate FROZEN clearing upwards */
188 while (clear_frozen && (freezer = parent_freezer(freezer))) {
189 freezer->state &= ~CGROUP_FROZEN;
190 clear_frozen = freezer->state & CGROUP_FREEZING;
191 }
192
193 mutex_unlock(&freezer_mutex); 190 mutex_unlock(&freezer_mutex);
194} 191}
195 192
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index cdd8df4e991c..b50d5a167fda 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -106,7 +106,7 @@ static void pids_uncharge(struct pids_cgroup *pids, int num)
106{ 106{
107 struct pids_cgroup *p; 107 struct pids_cgroup *p;
108 108
109 for (p = pids; p; p = parent_pids(p)) 109 for (p = pids; parent_pids(p); p = parent_pids(p))
110 pids_cancel(p, num); 110 pids_cancel(p, num);
111} 111}
112 112
@@ -123,7 +123,7 @@ static void pids_charge(struct pids_cgroup *pids, int num)
123{ 123{
124 struct pids_cgroup *p; 124 struct pids_cgroup *p;
125 125
126 for (p = pids; p; p = parent_pids(p)) 126 for (p = pids; parent_pids(p); p = parent_pids(p))
127 atomic64_add(num, &p->counter); 127 atomic64_add(num, &p->counter);
128} 128}
129 129
@@ -140,7 +140,7 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
140{ 140{
141 struct pids_cgroup *p, *q; 141 struct pids_cgroup *p, *q;
142 142
143 for (p = pids; p; p = parent_pids(p)) { 143 for (p = pids; parent_pids(p); p = parent_pids(p)) {
144 int64_t new = atomic64_add_return(num, &p->counter); 144 int64_t new = atomic64_add_return(num, &p->counter);
145 145
146 /* 146 /*
@@ -162,13 +162,13 @@ revert:
162 return -EAGAIN; 162 return -EAGAIN;
163} 163}
164 164
165static int pids_can_attach(struct cgroup_subsys_state *css, 165static int pids_can_attach(struct cgroup_taskset *tset)
166 struct cgroup_taskset *tset)
167{ 166{
168 struct pids_cgroup *pids = css_pids(css);
169 struct task_struct *task; 167 struct task_struct *task;
168 struct cgroup_subsys_state *dst_css;
170 169
171 cgroup_taskset_for_each(task, tset) { 170 cgroup_taskset_for_each(task, dst_css, tset) {
171 struct pids_cgroup *pids = css_pids(dst_css);
172 struct cgroup_subsys_state *old_css; 172 struct cgroup_subsys_state *old_css;
173 struct pids_cgroup *old_pids; 173 struct pids_cgroup *old_pids;
174 174
@@ -187,13 +187,13 @@ static int pids_can_attach(struct cgroup_subsys_state *css,
187 return 0; 187 return 0;
188} 188}
189 189
190static void pids_cancel_attach(struct cgroup_subsys_state *css, 190static void pids_cancel_attach(struct cgroup_taskset *tset)
191 struct cgroup_taskset *tset)
192{ 191{
193 struct pids_cgroup *pids = css_pids(css);
194 struct task_struct *task; 192 struct task_struct *task;
193 struct cgroup_subsys_state *dst_css;
195 194
196 cgroup_taskset_for_each(task, tset) { 195 cgroup_taskset_for_each(task, dst_css, tset) {
196 struct pids_cgroup *pids = css_pids(dst_css);
197 struct cgroup_subsys_state *old_css; 197 struct cgroup_subsys_state *old_css;
198 struct pids_cgroup *old_pids; 198 struct pids_cgroup *old_pids;
199 199
@@ -205,65 +205,28 @@ static void pids_cancel_attach(struct cgroup_subsys_state *css,
205 } 205 }
206} 206}
207 207
208/*
209 * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
210 * on threadgroup_change_begin() held by the copy_process().
211 */
208static int pids_can_fork(struct task_struct *task, void **priv_p) 212static int pids_can_fork(struct task_struct *task, void **priv_p)
209{ 213{
210 struct cgroup_subsys_state *css; 214 struct cgroup_subsys_state *css;
211 struct pids_cgroup *pids; 215 struct pids_cgroup *pids;
212 int err;
213 216
214 /* 217 css = task_css_check(current, pids_cgrp_id, true);
215 * Use the "current" task_css for the pids subsystem as the tentative
216 * css. It is possible we will charge the wrong hierarchy, in which
217 * case we will forcefully revert/reapply the charge on the right
218 * hierarchy after it is committed to the task proper.
219 */
220 css = task_get_css(current, pids_cgrp_id);
221 pids = css_pids(css); 218 pids = css_pids(css);
222 219 return pids_try_charge(pids, 1);
223 err = pids_try_charge(pids, 1);
224 if (err)
225 goto err_css_put;
226
227 *priv_p = css;
228 return 0;
229
230err_css_put:
231 css_put(css);
232 return err;
233} 220}
234 221
235static void pids_cancel_fork(struct task_struct *task, void *priv) 222static void pids_cancel_fork(struct task_struct *task, void *priv)
236{ 223{
237 struct cgroup_subsys_state *css = priv;
238 struct pids_cgroup *pids = css_pids(css);
239
240 pids_uncharge(pids, 1);
241 css_put(css);
242}
243
244static void pids_fork(struct task_struct *task, void *priv)
245{
246 struct cgroup_subsys_state *css; 224 struct cgroup_subsys_state *css;
247 struct cgroup_subsys_state *old_css = priv;
248 struct pids_cgroup *pids; 225 struct pids_cgroup *pids;
249 struct pids_cgroup *old_pids = css_pids(old_css);
250 226
251 css = task_get_css(task, pids_cgrp_id); 227 css = task_css_check(current, pids_cgrp_id, true);
252 pids = css_pids(css); 228 pids = css_pids(css);
253 229 pids_uncharge(pids, 1);
254 /*
255 * If the association has changed, we have to revert and reapply the
256 * charge/uncharge on the wrong hierarchy to the current one. Since
257 * the association can only change due to an organisation event, its
258 * okay for us to ignore the limit in this case.
259 */
260 if (pids != old_pids) {
261 pids_uncharge(old_pids, 1);
262 pids_charge(pids, 1);
263 }
264
265 css_put(css);
266 css_put(old_css);
267} 230}
268 231
269static void pids_free(struct task_struct *task) 232static void pids_free(struct task_struct *task)
@@ -335,6 +298,7 @@ static struct cftype pids_files[] = {
335 { 298 {
336 .name = "current", 299 .name = "current",
337 .read_s64 = pids_current_read, 300 .read_s64 = pids_current_read,
301 .flags = CFTYPE_NOT_ON_ROOT,
338 }, 302 },
339 { } /* terminate */ 303 { } /* terminate */
340}; 304};
@@ -346,7 +310,6 @@ struct cgroup_subsys pids_cgrp_subsys = {
346 .cancel_attach = pids_cancel_attach, 310 .cancel_attach = pids_cancel_attach,
347 .can_fork = pids_can_fork, 311 .can_fork = pids_can_fork,
348 .cancel_fork = pids_cancel_fork, 312 .cancel_fork = pids_cancel_fork,
349 .fork = pids_fork,
350 .free = pids_free, 313 .free = pids_free,
351 .legacy_cftypes = pids_files, 314 .legacy_cftypes = pids_files,
352 .dfl_cftypes = pids_files, 315 .dfl_cftypes = pids_files,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 10ae73611d80..02a8ea5c9963 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1429,15 +1429,16 @@ static int fmeter_getrate(struct fmeter *fmp)
1429static struct cpuset *cpuset_attach_old_cs; 1429static struct cpuset *cpuset_attach_old_cs;
1430 1430
1431/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 1431/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1432static int cpuset_can_attach(struct cgroup_subsys_state *css, 1432static int cpuset_can_attach(struct cgroup_taskset *tset)
1433 struct cgroup_taskset *tset)
1434{ 1433{
1435 struct cpuset *cs = css_cs(css); 1434 struct cgroup_subsys_state *css;
1435 struct cpuset *cs;
1436 struct task_struct *task; 1436 struct task_struct *task;
1437 int ret; 1437 int ret;
1438 1438
1439 /* used later by cpuset_attach() */ 1439 /* used later by cpuset_attach() */
1440 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset)); 1440 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
1441 cs = css_cs(css);
1441 1442
1442 mutex_lock(&cpuset_mutex); 1443 mutex_lock(&cpuset_mutex);
1443 1444
@@ -1447,7 +1448,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
1447 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 1448 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1448 goto out_unlock; 1449 goto out_unlock;
1449 1450
1450 cgroup_taskset_for_each(task, tset) { 1451 cgroup_taskset_for_each(task, css, tset) {
1451 ret = task_can_attach(task, cs->cpus_allowed); 1452 ret = task_can_attach(task, cs->cpus_allowed);
1452 if (ret) 1453 if (ret)
1453 goto out_unlock; 1454 goto out_unlock;
@@ -1467,9 +1468,14 @@ out_unlock:
1467 return ret; 1468 return ret;
1468} 1469}
1469 1470
1470static void cpuset_cancel_attach(struct cgroup_subsys_state *css, 1471static void cpuset_cancel_attach(struct cgroup_taskset *tset)
1471 struct cgroup_taskset *tset)
1472{ 1472{
1473 struct cgroup_subsys_state *css;
1474 struct cpuset *cs;
1475
1476 cgroup_taskset_first(tset, &css);
1477 cs = css_cs(css);
1478
1473 mutex_lock(&cpuset_mutex); 1479 mutex_lock(&cpuset_mutex);
1474 css_cs(css)->attach_in_progress--; 1480 css_cs(css)->attach_in_progress--;
1475 mutex_unlock(&cpuset_mutex); 1481 mutex_unlock(&cpuset_mutex);
@@ -1482,16 +1488,19 @@ static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
1482 */ 1488 */
1483static cpumask_var_t cpus_attach; 1489static cpumask_var_t cpus_attach;
1484 1490
1485static void cpuset_attach(struct cgroup_subsys_state *css, 1491static void cpuset_attach(struct cgroup_taskset *tset)
1486 struct cgroup_taskset *tset)
1487{ 1492{
1488 /* static buf protected by cpuset_mutex */ 1493 /* static buf protected by cpuset_mutex */
1489 static nodemask_t cpuset_attach_nodemask_to; 1494 static nodemask_t cpuset_attach_nodemask_to;
1490 struct task_struct *task; 1495 struct task_struct *task;
1491 struct task_struct *leader; 1496 struct task_struct *leader;
1492 struct cpuset *cs = css_cs(css); 1497 struct cgroup_subsys_state *css;
1498 struct cpuset *cs;
1493 struct cpuset *oldcs = cpuset_attach_old_cs; 1499 struct cpuset *oldcs = cpuset_attach_old_cs;
1494 1500
1501 cgroup_taskset_first(tset, &css);
1502 cs = css_cs(css);
1503
1495 mutex_lock(&cpuset_mutex); 1504 mutex_lock(&cpuset_mutex);
1496 1505
1497 /* prepare for attach */ 1506 /* prepare for attach */
@@ -1502,7 +1511,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
1502 1511
1503 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 1512 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1504 1513
1505 cgroup_taskset_for_each(task, tset) { 1514 cgroup_taskset_for_each(task, css, tset) {
1506 /* 1515 /*
1507 * can_attach beforehand should guarantee that this doesn't 1516 * can_attach beforehand should guarantee that this doesn't
1508 * fail. TODO: have a better way to handle failure here 1517 * fail. TODO: have a better way to handle failure here
@@ -1518,7 +1527,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
1518 * sleep and should be moved outside migration path proper. 1527 * sleep and should be moved outside migration path proper.
1519 */ 1528 */
1520 cpuset_attach_nodemask_to = cs->effective_mems; 1529 cpuset_attach_nodemask_to = cs->effective_mems;
1521 cgroup_taskset_for_each_leader(leader, tset) { 1530 cgroup_taskset_for_each_leader(leader, css, tset) {
1522 struct mm_struct *mm = get_task_mm(leader); 1531 struct mm_struct *mm = get_task_mm(leader);
1523 1532
1524 if (mm) { 1533 if (mm) {
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index d659487254d5..9c418002b8c1 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 36babfd20648..ef2d6ea10736 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
435 if (!is_cgroup_event(event)) 435 if (!is_cgroup_event(event))
436 return; 436 return;
437 437
438 cgrp = perf_cgroup_from_task(current); 438 cgrp = perf_cgroup_from_task(current, event->ctx);
439 /* 439 /*
440 * Do not update time when cgroup is not active 440 * Do not update time when cgroup is not active
441 */ 441 */
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
458 if (!task || !ctx->nr_cgroups) 458 if (!task || !ctx->nr_cgroups)
459 return; 459 return;
460 460
461 cgrp = perf_cgroup_from_task(task); 461 cgrp = perf_cgroup_from_task(task, ctx);
462 info = this_cpu_ptr(cgrp->info); 462 info = this_cpu_ptr(cgrp->info);
463 info->timestamp = ctx->timestamp; 463 info->timestamp = ctx->timestamp;
464} 464}
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
489 * we reschedule only in the presence of cgroup 489 * we reschedule only in the presence of cgroup
490 * constrained events. 490 * constrained events.
491 */ 491 */
492 rcu_read_lock();
493 492
494 list_for_each_entry_rcu(pmu, &pmus, entry) { 493 list_for_each_entry_rcu(pmu, &pmus, entry) {
495 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 494 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -522,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
522 * set cgrp before ctxsw in to allow 521 * set cgrp before ctxsw in to allow
523 * event_filter_match() to not have to pass 522 * event_filter_match() to not have to pass
524 * task around 523 * task around
524 * we pass the cpuctx->ctx to perf_cgroup_from_task()
525 * because cgorup events are only per-cpu
525 */ 526 */
526 cpuctx->cgrp = perf_cgroup_from_task(task); 527 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
527 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 528 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
528 } 529 }
529 perf_pmu_enable(cpuctx->ctx.pmu); 530 perf_pmu_enable(cpuctx->ctx.pmu);
@@ -531,8 +532,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
531 } 532 }
532 } 533 }
533 534
534 rcu_read_unlock();
535
536 local_irq_restore(flags); 535 local_irq_restore(flags);
537} 536}
538 537
@@ -542,17 +541,20 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
542 struct perf_cgroup *cgrp1; 541 struct perf_cgroup *cgrp1;
543 struct perf_cgroup *cgrp2 = NULL; 542 struct perf_cgroup *cgrp2 = NULL;
544 543
544 rcu_read_lock();
545 /* 545 /*
546 * we come here when we know perf_cgroup_events > 0 546 * we come here when we know perf_cgroup_events > 0
547 * we do not need to pass the ctx here because we know
548 * we are holding the rcu lock
547 */ 549 */
548 cgrp1 = perf_cgroup_from_task(task); 550 cgrp1 = perf_cgroup_from_task(task, NULL);
549 551
550 /* 552 /*
551 * next is NULL when called from perf_event_enable_on_exec() 553 * next is NULL when called from perf_event_enable_on_exec()
552 * that will systematically cause a cgroup_switch() 554 * that will systematically cause a cgroup_switch()
553 */ 555 */
554 if (next) 556 if (next)
555 cgrp2 = perf_cgroup_from_task(next); 557 cgrp2 = perf_cgroup_from_task(next, NULL);
556 558
557 /* 559 /*
558 * only schedule out current cgroup events if we know 560 * only schedule out current cgroup events if we know
@@ -561,6 +563,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
561 */ 563 */
562 if (cgrp1 != cgrp2) 564 if (cgrp1 != cgrp2)
563 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 565 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
566
567 rcu_read_unlock();
564} 568}
565 569
566static inline void perf_cgroup_sched_in(struct task_struct *prev, 570static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,13 +573,16 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
569 struct perf_cgroup *cgrp1; 573 struct perf_cgroup *cgrp1;
570 struct perf_cgroup *cgrp2 = NULL; 574 struct perf_cgroup *cgrp2 = NULL;
571 575
576 rcu_read_lock();
572 /* 577 /*
573 * we come here when we know perf_cgroup_events > 0 578 * we come here when we know perf_cgroup_events > 0
579 * we do not need to pass the ctx here because we know
580 * we are holding the rcu lock
574 */ 581 */
575 cgrp1 = perf_cgroup_from_task(task); 582 cgrp1 = perf_cgroup_from_task(task, NULL);
576 583
577 /* prev can never be NULL */ 584 /* prev can never be NULL */
578 cgrp2 = perf_cgroup_from_task(prev); 585 cgrp2 = perf_cgroup_from_task(prev, NULL);
579 586
580 /* 587 /*
581 * only need to schedule in cgroup events if we are changing 588 * only need to schedule in cgroup events if we are changing
@@ -584,6 +591,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
584 */ 591 */
585 if (cgrp1 != cgrp2) 592 if (cgrp1 != cgrp2)
586 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 593 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
594
595 rcu_read_unlock();
587} 596}
588 597
589static inline int perf_cgroup_connect(int fd, struct perf_event *event, 598static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -4216,7 +4225,14 @@ retry:
4216 goto retry; 4225 goto retry;
4217 } 4226 }
4218 4227
4219 __perf_event_period(&pe); 4228 if (event->attr.freq) {
4229 event->attr.sample_freq = value;
4230 } else {
4231 event->attr.sample_period = value;
4232 event->hw.sample_period = value;
4233 }
4234
4235 local64_set(&event->hw.period_left, 0);
4220 raw_spin_unlock_irq(&ctx->lock); 4236 raw_spin_unlock_irq(&ctx->lock);
4221 4237
4222 return 0; 4238 return 0;
@@ -5667,6 +5683,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
5667} 5683}
5668 5684
5669static void 5685static void
5686perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
5687 struct perf_event_context *task_ctx)
5688{
5689 rcu_read_lock();
5690 preempt_disable();
5691 perf_event_aux_ctx(task_ctx, output, data);
5692 preempt_enable();
5693 rcu_read_unlock();
5694}
5695
5696static void
5670perf_event_aux(perf_event_aux_output_cb output, void *data, 5697perf_event_aux(perf_event_aux_output_cb output, void *data,
5671 struct perf_event_context *task_ctx) 5698 struct perf_event_context *task_ctx)
5672{ 5699{
@@ -5675,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
5675 struct pmu *pmu; 5702 struct pmu *pmu;
5676 int ctxn; 5703 int ctxn;
5677 5704
5705 /*
5706 * If we have task_ctx != NULL we only notify
5707 * the task context itself. The task_ctx is set
5708 * only for EXIT events before releasing task
5709 * context.
5710 */
5711 if (task_ctx) {
5712 perf_event_aux_task_ctx(output, data, task_ctx);
5713 return;
5714 }
5715
5678 rcu_read_lock(); 5716 rcu_read_lock();
5679 list_for_each_entry_rcu(pmu, &pmus, entry) { 5717 list_for_each_entry_rcu(pmu, &pmus, entry) {
5680 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5718 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
5681 if (cpuctx->unique_pmu != pmu) 5719 if (cpuctx->unique_pmu != pmu)
5682 goto next; 5720 goto next;
5683 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5721 perf_event_aux_ctx(&cpuctx->ctx, output, data);
5684 if (task_ctx)
5685 goto next;
5686 ctxn = pmu->task_ctx_nr; 5722 ctxn = pmu->task_ctx_nr;
5687 if (ctxn < 0) 5723 if (ctxn < 0)
5688 goto next; 5724 goto next;
@@ -5692,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
5692next: 5728next:
5693 put_cpu_ptr(pmu->pmu_cpu_context); 5729 put_cpu_ptr(pmu->pmu_cpu_context);
5694 } 5730 }
5695
5696 if (task_ctx) {
5697 preempt_disable();
5698 perf_event_aux_ctx(task_ctx, output, data);
5699 preempt_enable();
5700 }
5701 rcu_read_unlock(); 5731 rcu_read_unlock();
5702} 5732}
5703 5733
@@ -8787,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8787 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8817 struct perf_event_context *child_ctx, *clone_ctx = NULL;
8788 unsigned long flags; 8818 unsigned long flags;
8789 8819
8790 if (likely(!child->perf_event_ctxp[ctxn])) { 8820 if (likely(!child->perf_event_ctxp[ctxn]))
8791 perf_event_task(child, NULL, 0);
8792 return; 8821 return;
8793 }
8794 8822
8795 local_irq_save(flags); 8823 local_irq_save(flags);
8796 /* 8824 /*
@@ -8874,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)
8874 8902
8875 for_each_task_context_nr(ctxn) 8903 for_each_task_context_nr(ctxn)
8876 perf_event_exit_task_context(child, ctxn); 8904 perf_event_exit_task_context(child, ctxn);
8905
8906 /*
8907 * The perf_event_exit_task_context calls perf_event_task
8908 * with child's task_ctx, which generates EXIT events for
8909 * child contexts and sets child->perf_event_ctxp[] to NULL.
8910 * At this point we need to send EXIT events to cpu contexts.
8911 */
8912 perf_event_task(child, NULL, 0);
8877} 8913}
8878 8914
8879static void perf_free_event(struct perf_event *event, 8915static void perf_free_event(struct perf_event *event,
@@ -9452,16 +9488,18 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
9452static int __perf_cgroup_move(void *info) 9488static int __perf_cgroup_move(void *info)
9453{ 9489{
9454 struct task_struct *task = info; 9490 struct task_struct *task = info;
9491 rcu_read_lock();
9455 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9492 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
9493 rcu_read_unlock();
9456 return 0; 9494 return 0;
9457} 9495}
9458 9496
9459static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9497static void perf_cgroup_attach(struct cgroup_taskset *tset)
9460 struct cgroup_taskset *tset)
9461{ 9498{
9462 struct task_struct *task; 9499 struct task_struct *task;
9500 struct cgroup_subsys_state *css;
9463 9501
9464 cgroup_taskset_for_each(task, tset) 9502 cgroup_taskset_for_each(task, css, tset)
9465 task_function_call(task, __perf_cgroup_move, task); 9503 task_function_call(task, __perf_cgroup_move, task);
9466} 9504}
9467 9505
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b5d1ea79c595..adfdc0536117 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4e5e9798aa0c..7dad84913abf 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -19,7 +19,7 @@
19 * Authors: 19 * Authors:
20 * Srikar Dronamraju 20 * Srikar Dronamraju
21 * Jim Keniston 21 * Jim Keniston
22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index f97f2c449f5c..fce002ee3ddf 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1368 p->real_start_time = ktime_get_boot_ns(); 1368 p->real_start_time = ktime_get_boot_ns();
1369 p->io_context = NULL; 1369 p->io_context = NULL;
1370 p->audit_context = NULL; 1370 p->audit_context = NULL;
1371 if (clone_flags & CLONE_THREAD) 1371 threadgroup_change_begin(current);
1372 threadgroup_change_begin(current);
1373 cgroup_fork(p); 1372 cgroup_fork(p);
1374#ifdef CONFIG_NUMA 1373#ifdef CONFIG_NUMA
1375 p->mempolicy = mpol_dup(p->mempolicy); 1374 p->mempolicy = mpol_dup(p->mempolicy);
@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1610 1609
1611 proc_fork_connector(p); 1610 proc_fork_connector(p);
1612 cgroup_post_fork(p, cgrp_ss_priv); 1611 cgroup_post_fork(p, cgrp_ss_priv);
1613 if (clone_flags & CLONE_THREAD) 1612 threadgroup_change_end(current);
1614 threadgroup_change_end(current);
1615 perf_event_fork(p); 1613 perf_event_fork(p);
1616 1614
1617 trace_task_newtask(p, clone_flags); 1615 trace_task_newtask(p, clone_flags);
@@ -1652,8 +1650,7 @@ bad_fork_cleanup_policy:
1652 mpol_put(p->mempolicy); 1650 mpol_put(p->mempolicy);
1653bad_fork_cleanup_threadgroup_lock: 1651bad_fork_cleanup_threadgroup_lock:
1654#endif 1652#endif
1655 if (clone_flags & CLONE_THREAD) 1653 threadgroup_change_end(current);
1656 threadgroup_change_end(current);
1657 delayacct_tsk_free(p); 1654 delayacct_tsk_free(p);
1658bad_fork_cleanup_count: 1655bad_fork_cleanup_count:
1659 atomic_dec(&p->cred->user->processes); 1656 atomic_dec(&p->cred->user->processes);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index cbf9fb899d92..bcf107ce0854 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
3 * 3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq 4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe. 5 * context. The enqueueing is NMI-safe.
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index f7dd15d537f9..05254eeb4b4e 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -2,7 +2,7 @@
2 * jump label support 2 * jump label support
3 * 3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> 4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2011 Peter Zijlstra
6 * 6 *
7 */ 7 */
8#include <linux/memory.h> 8#include <linux/memory.h>
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index deae3907ac1e..60ace56618f6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -6,7 +6,7 @@
6 * Started by Ingo Molnar: 6 * Started by Ingo Molnar:
7 * 7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10 * 10 *
11 * this code maps all the lock dependencies as they occur in a live kernel 11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs: 12 * and will warn about the following classes of locking bugs:
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index d83d798bef95..dbb61a302548 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -6,7 +6,7 @@
6 * Started by Ingo Molnar: 6 * Started by Ingo Molnar:
7 * 7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10 * 10 *
11 * Code for /proc/lockdep and /proc/lockdep_stats: 11 * Code for /proc/lockdep and /proc/lockdep_stats:
12 * 12 *
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index d092a0c9c2d4..05a37857ab55 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -93,10 +93,12 @@ bool osq_lock(struct optimistic_spin_queue *lock)
93 node->cpu = curr; 93 node->cpu = curr;
94 94
95 /* 95 /*
96 * ACQUIRE semantics, pairs with corresponding RELEASE 96 * We need both ACQUIRE (pairs with corresponding RELEASE in
97 * in unlock() uncontended, or fastpath. 97 * unlock() uncontended, or fastpath) and RELEASE (to publish
98 * the node fields we just initialised) semantics when updating
99 * the lock tail.
98 */ 100 */
99 old = atomic_xchg_acquire(&lock->tail, curr); 101 old = atomic_xchg(&lock->tail, curr);
100 if (old == OSQ_UNLOCKED_VAL) 102 if (old == OSQ_UNLOCKED_VAL)
101 return true; 103 return true;
102 104
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c0a205101c23..caf4041f5b0a 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sched_clock for unstable cpu clocks 2 * sched_clock for unstable cpu clocks
3 * 3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * Updates and enhancements: 6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> 7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7063c6a07440..732e993b564b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8241,12 +8241,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private)
8241 sched_move_task(task); 8241 sched_move_task(task);
8242} 8242}
8243 8243
8244static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 8244static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8245 struct cgroup_taskset *tset)
8246{ 8245{
8247 struct task_struct *task; 8246 struct task_struct *task;
8247 struct cgroup_subsys_state *css;
8248 8248
8249 cgroup_taskset_for_each(task, tset) { 8249 cgroup_taskset_for_each(task, css, tset) {
8250#ifdef CONFIG_RT_GROUP_SCHED 8250#ifdef CONFIG_RT_GROUP_SCHED
8251 if (!sched_rt_can_attach(css_tg(css), task)) 8251 if (!sched_rt_can_attach(css_tg(css), task))
8252 return -EINVAL; 8252 return -EINVAL;
@@ -8259,12 +8259,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
8259 return 0; 8259 return 0;
8260} 8260}
8261 8261
8262static void cpu_cgroup_attach(struct cgroup_subsys_state *css, 8262static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8263 struct cgroup_taskset *tset)
8264{ 8263{
8265 struct task_struct *task; 8264 struct task_struct *task;
8265 struct cgroup_subsys_state *css;
8266 8266
8267 cgroup_taskset_for_each(task, tset) 8267 cgroup_taskset_for_each(task, css, tset)
8268 sched_move_task(task); 8268 sched_move_task(task);
8269} 8269}
8270 8270
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f04fda8f669c..90e26b11deaa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -17,7 +17,7 @@
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 * 18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21 */ 21 */
22 22
23#include <linux/latencytop.h> 23#include <linux/latencytop.h>
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index f10bd873e684..f15d6b6a538a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -392,7 +392,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
392 do { 392 do {
393 prepare_to_wait(wq, &q->wait, mode); 393 prepare_to_wait(wq, &q->wait, mode);
394 if (test_bit(q->key.bit_nr, q->key.flags)) 394 if (test_bit(q->key.bit_nr, q->key.flags))
395 ret = (*action)(&q->key); 395 ret = (*action)(&q->key, mode);
396 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); 396 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
397 finish_wait(wq, &q->wait); 397 finish_wait(wq, &q->wait);
398 return ret; 398 return ret;
@@ -431,7 +431,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
431 prepare_to_wait_exclusive(wq, &q->wait, mode); 431 prepare_to_wait_exclusive(wq, &q->wait, mode);
432 if (!test_bit(q->key.bit_nr, q->key.flags)) 432 if (!test_bit(q->key.bit_nr, q->key.flags))
433 continue; 433 continue;
434 ret = action(&q->key); 434 ret = action(&q->key, mode);
435 if (!ret) 435 if (!ret)
436 continue; 436 continue;
437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); 437 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
@@ -581,43 +581,43 @@ void wake_up_atomic_t(atomic_t *p)
581} 581}
582EXPORT_SYMBOL(wake_up_atomic_t); 582EXPORT_SYMBOL(wake_up_atomic_t);
583 583
584__sched int bit_wait(struct wait_bit_key *word) 584__sched int bit_wait(struct wait_bit_key *word, int mode)
585{ 585{
586 schedule(); 586 schedule();
587 if (signal_pending(current)) 587 if (signal_pending_state(mode, current))
588 return -EINTR; 588 return -EINTR;
589 return 0; 589 return 0;
590} 590}
591EXPORT_SYMBOL(bit_wait); 591EXPORT_SYMBOL(bit_wait);
592 592
593__sched int bit_wait_io(struct wait_bit_key *word) 593__sched int bit_wait_io(struct wait_bit_key *word, int mode)
594{ 594{
595 io_schedule(); 595 io_schedule();
596 if (signal_pending(current)) 596 if (signal_pending_state(mode, current))
597 return -EINTR; 597 return -EINTR;
598 return 0; 598 return 0;
599} 599}
600EXPORT_SYMBOL(bit_wait_io); 600EXPORT_SYMBOL(bit_wait_io);
601 601
602__sched int bit_wait_timeout(struct wait_bit_key *word) 602__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
603{ 603{
604 unsigned long now = READ_ONCE(jiffies); 604 unsigned long now = READ_ONCE(jiffies);
605 if (time_after_eq(now, word->timeout)) 605 if (time_after_eq(now, word->timeout))
606 return -EAGAIN; 606 return -EAGAIN;
607 schedule_timeout(word->timeout - now); 607 schedule_timeout(word->timeout - now);
608 if (signal_pending(current)) 608 if (signal_pending_state(mode, current))
609 return -EINTR; 609 return -EINTR;
610 return 0; 610 return 0;
611} 611}
612EXPORT_SYMBOL_GPL(bit_wait_timeout); 612EXPORT_SYMBOL_GPL(bit_wait_timeout);
613 613
614__sched int bit_wait_io_timeout(struct wait_bit_key *word) 614__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
615{ 615{
616 unsigned long now = READ_ONCE(jiffies); 616 unsigned long now = READ_ONCE(jiffies);
617 if (time_after_eq(now, word->timeout)) 617 if (time_after_eq(now, word->timeout))
618 return -EAGAIN; 618 return -EAGAIN;
619 io_schedule_timeout(word->timeout - now); 619 io_schedule_timeout(word->timeout - now);
620 if (signal_pending(current)) 620 if (signal_pending_state(mode, current))
621 return -EINTR; 621 return -EINTR;
622 return 0; 622 return 0;
623} 623}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 867bc20e1ef1..a3bbaee77c58 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -531,7 +531,7 @@ static int __init cpu_stop_init(void)
531} 531}
532early_initcall(cpu_stop_init); 532early_initcall(cpu_stop_init);
533 533
534#ifdef CONFIG_STOP_MACHINE 534#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
535 535
536static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 536static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
537{ 537{
@@ -631,4 +631,4 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
631 return ret ?: done.ret; 631 return ret ?: done.ret;
632} 632}
633 633
634#endif /* CONFIG_STOP_MACHINE */ 634#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index abfc903e741e..cc9f7a9319be 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * trace event based perf event profiling/tracing 2 * trace event based perf event profiling/tracing
3 * 3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */ 6 */
7 7
diff --git a/lib/btree.c b/lib/btree.c
index 4264871ea1a0..f93a945274af 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is 7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright 2007, Red Hat Inc. Peter Zijlstra
9 * GPLv2 9 * GPLv2
10 * 10 *
11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch 11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8855f019ebe8..d34bd24c2c84 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
1464 entry->type = dma_debug_coherent; 1464 entry->type = dma_debug_coherent;
1465 entry->dev = dev; 1465 entry->dev = dev;
1466 entry->pfn = page_to_pfn(virt_to_page(virt)); 1466 entry->pfn = page_to_pfn(virt_to_page(virt));
1467 entry->offset = (size_t) virt & PAGE_MASK; 1467 entry->offset = (size_t) virt & ~PAGE_MASK;
1468 entry->size = size; 1468 entry->size = size;
1469 entry->dev_addr = dma_addr; 1469 entry->dev_addr = dma_addr;
1470 entry->direction = DMA_BIDIRECTIONAL; 1470 entry->direction = DMA_BIDIRECTIONAL;
@@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
1480 .type = dma_debug_coherent, 1480 .type = dma_debug_coherent,
1481 .dev = dev, 1481 .dev = dev,
1482 .pfn = page_to_pfn(virt_to_page(virt)), 1482 .pfn = page_to_pfn(virt_to_page(virt)),
1483 .offset = (size_t) virt & PAGE_MASK, 1483 .offset = (size_t) virt & ~PAGE_MASK,
1484 .dev_addr = addr, 1484 .dev_addr = addr,
1485 .size = size, 1485 .size = size,
1486 .direction = DMA_BIDIRECTIONAL, 1486 .direction = DMA_BIDIRECTIONAL,
diff --git a/lib/proportions.c b/lib/proportions.c
index 6f724298f67a..efa54f259ea9 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Floating proportions 2 * Floating proportions
3 * 3 *
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * Description: 6 * Description:
7 * 7 *
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a54ff8949f91..eb9240c458fa 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -389,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht,
389 return false; 389 return false;
390} 390}
391 391
392int rhashtable_insert_rehash(struct rhashtable *ht) 392int rhashtable_insert_rehash(struct rhashtable *ht,
393 struct bucket_table *tbl)
393{ 394{
394 struct bucket_table *old_tbl; 395 struct bucket_table *old_tbl;
395 struct bucket_table *new_tbl; 396 struct bucket_table *new_tbl;
396 struct bucket_table *tbl;
397 unsigned int size; 397 unsigned int size;
398 int err; 398 int err;
399 399
400 old_tbl = rht_dereference_rcu(ht->tbl, ht); 400 old_tbl = rht_dereference_rcu(ht->tbl, ht);
401 tbl = rhashtable_last_table(ht, old_tbl);
402 401
403 size = tbl->size; 402 size = tbl->size;
404 403
404 err = -EBUSY;
405
405 if (rht_grow_above_75(ht, tbl)) 406 if (rht_grow_above_75(ht, tbl))
406 size *= 2; 407 size *= 2;
407 /* Do not schedule more than one rehash */ 408 /* Do not schedule more than one rehash */
408 else if (old_tbl != tbl) 409 else if (old_tbl != tbl)
409 return -EBUSY; 410 goto fail;
411
412 err = -ENOMEM;
410 413
411 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 414 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
412 if (new_tbl == NULL) { 415 if (new_tbl == NULL)
413 /* Schedule async resize/rehash to try allocation 416 goto fail;
414 * non-atomic context.
415 */
416 schedule_work(&ht->run_work);
417 return -ENOMEM;
418 }
419 417
420 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 418 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
421 if (err) { 419 if (err) {
@@ -426,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
426 schedule_work(&ht->run_work); 424 schedule_work(&ht->run_work);
427 425
428 return err; 426 return err;
427
428fail:
429 /* Do not fail the insert if someone else did a rehash. */
430 if (likely(rcu_dereference_raw(tbl->future_tbl)))
431 return 0;
432
433 /* Schedule async rehash to retry allocation in process context. */
434 if (err == -ENOMEM)
435 schedule_work(&ht->run_work);
436
437 return err;
429} 438}
430EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); 439EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
431 440
432int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 441struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
433 struct rhash_head *obj, 442 const void *key,
434 struct bucket_table *tbl) 443 struct rhash_head *obj,
444 struct bucket_table *tbl)
435{ 445{
436 struct rhash_head *head; 446 struct rhash_head *head;
437 unsigned int hash; 447 unsigned int hash;
@@ -467,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
467exit: 477exit:
468 spin_unlock(rht_bucket_lock(tbl, hash)); 478 spin_unlock(rht_bucket_lock(tbl, hash));
469 479
470 return err; 480 if (err == 0)
481 return NULL;
482 else if (err == -EAGAIN)
483 return tbl;
484 else
485 return ERR_PTR(err);
471} 486}
472EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 487EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
473 488
@@ -503,10 +518,10 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
503 if (!iter->walker) 518 if (!iter->walker)
504 return -ENOMEM; 519 return -ENOMEM;
505 520
506 mutex_lock(&ht->mutex); 521 spin_lock(&ht->lock);
507 iter->walker->tbl = rht_dereference(ht->tbl, ht); 522 iter->walker->tbl = rht_dereference(ht->tbl, ht);
508 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 523 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
509 mutex_unlock(&ht->mutex); 524 spin_unlock(&ht->lock);
510 525
511 return 0; 526 return 0;
512} 527}
@@ -520,10 +535,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
520 */ 535 */
521void rhashtable_walk_exit(struct rhashtable_iter *iter) 536void rhashtable_walk_exit(struct rhashtable_iter *iter)
522{ 537{
523 mutex_lock(&iter->ht->mutex); 538 spin_lock(&iter->ht->lock);
524 if (iter->walker->tbl) 539 if (iter->walker->tbl)
525 list_del(&iter->walker->list); 540 list_del(&iter->walker->list);
526 mutex_unlock(&iter->ht->mutex); 541 spin_unlock(&iter->ht->lock);
527 kfree(iter->walker); 542 kfree(iter->walker);
528} 543}
529EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 544EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@@ -547,14 +562,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
547{ 562{
548 struct rhashtable *ht = iter->ht; 563 struct rhashtable *ht = iter->ht;
549 564
550 mutex_lock(&ht->mutex); 565 rcu_read_lock();
551 566
567 spin_lock(&ht->lock);
552 if (iter->walker->tbl) 568 if (iter->walker->tbl)
553 list_del(&iter->walker->list); 569 list_del(&iter->walker->list);
554 570 spin_unlock(&ht->lock);
555 rcu_read_lock();
556
557 mutex_unlock(&ht->mutex);
558 571
559 if (!iter->walker->tbl) { 572 if (!iter->walker->tbl) {
560 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); 573 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
@@ -723,9 +736,6 @@ int rhashtable_init(struct rhashtable *ht,
723 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 736 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
724 return -EINVAL; 737 return -EINVAL;
725 738
726 if (params->nelem_hint)
727 size = rounded_hashtable_size(params);
728
729 memset(ht, 0, sizeof(*ht)); 739 memset(ht, 0, sizeof(*ht));
730 mutex_init(&ht->mutex); 740 mutex_init(&ht->mutex);
731 spin_lock_init(&ht->lock); 741 spin_lock_init(&ht->lock);
@@ -745,6 +755,9 @@ int rhashtable_init(struct rhashtable *ht,
745 755
746 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 756 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
747 757
758 if (params->nelem_hint)
759 size = rounded_hashtable_size(&ht->p);
760
748 /* The maximum (not average) chain length grows with the 761 /* The maximum (not average) chain length grows with the
749 * size of the hash table, at a rate of (log N)/(log log N). 762 * size of the hash table, at a rate of (log N)/(log log N).
750 * The value of 16 is selected so that even if the hash 763 * The value of 16 is selected so that even if the hash
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8ed2ffd963c5..7340353f8aea 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -957,8 +957,9 @@ EXPORT_SYMBOL(congestion_wait);
957 * jiffies for either a BDI to exit congestion of the given @sync queue 957 * jiffies for either a BDI to exit congestion of the given @sync queue
958 * or a write to complete. 958 * or a write to complete.
959 * 959 *
960 * In the absence of zone congestion, cond_resched() is called to yield 960 * In the absence of zone congestion, a short sleep or a cond_resched is
961 * the processor if necessary but otherwise does not sleep. 961 * performed to yield the processor and to allow other subsystems to make
962 * a forward progress.
962 * 963 *
963 * The return value is 0 if the sleep is for the full timeout. Otherwise, 964 * The return value is 0 if the sleep is for the full timeout. Otherwise,
964 * it is the number of jiffies that were still remaining when the function 965 * it is the number of jiffies that were still remaining when the function
@@ -978,7 +979,19 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
978 */ 979 */
979 if (atomic_read(&nr_wb_congested[sync]) == 0 || 980 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
980 !test_bit(ZONE_CONGESTED, &zone->flags)) { 981 !test_bit(ZONE_CONGESTED, &zone->flags)) {
981 cond_resched(); 982
983 /*
984 * Memory allocation/reclaim might be called from a WQ
985 * context and the current implementation of the WQ
986 * concurrency control doesn't recognize that a particular
987 * WQ is congested if the worker thread is looping without
988 * ever sleeping. Therefore we have to do a short sleep
989 * here rather than calling cond_resched().
990 */
991 if (current->flags & PF_WQ_WORKER)
992 schedule_timeout(1);
993 else
994 cond_resched();
982 995
983 /* In case we scheduled, work out time remaining */ 996 /* In case we scheduled, work out time remaining */
984 ret = timeout - (jiffies - start); 997 ret = timeout - (jiffies - start);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 827bb02a43a4..ef6963b577fd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -372,8 +372,10 @@ retry_locked:
372 spin_unlock(&resv->lock); 372 spin_unlock(&resv->lock);
373 373
374 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 374 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
375 if (!trg) 375 if (!trg) {
376 kfree(nrg);
376 return -ENOMEM; 377 return -ENOMEM;
378 }
377 379
378 spin_lock(&resv->lock); 380 spin_lock(&resv->lock);
379 list_add(&trg->link, &resv->region_cache); 381 list_add(&trg->link, &resv->region_cache);
@@ -483,8 +485,16 @@ static long region_del(struct resv_map *resv, long f, long t)
483retry: 485retry:
484 spin_lock(&resv->lock); 486 spin_lock(&resv->lock);
485 list_for_each_entry_safe(rg, trg, head, link) { 487 list_for_each_entry_safe(rg, trg, head, link) {
486 if (rg->to <= f) 488 /*
489 * Skip regions before the range to be deleted. file_region
490 * ranges are normally of the form [from, to). However, there
491 * may be a "placeholder" entry in the map which is of the form
492 * (from, to) with from == to. Check for placeholder entries
493 * at the beginning of the range to be deleted.
494 */
495 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
487 continue; 496 continue;
497
488 if (rg->from >= t) 498 if (rg->from >= t)
489 break; 499 break;
490 500
@@ -1886,7 +1896,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
1886 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); 1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1887 if (!page) 1897 if (!page)
1888 goto out_uncharge_cgroup; 1898 goto out_uncharge_cgroup;
1889 1899 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1900 SetPagePrivate(page);
1901 h->resv_huge_pages--;
1902 }
1890 spin_lock(&hugetlb_lock); 1903 spin_lock(&hugetlb_lock);
1891 list_move(&page->lru, &h->hugepage_activelist); 1904 list_move(&page->lru, &h->hugepage_activelist);
1892 /* Fall through */ 1905 /* Fall through */
@@ -3693,12 +3706,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3693 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3706 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3694 return VM_FAULT_HWPOISON_LARGE | 3707 return VM_FAULT_HWPOISON_LARGE |
3695 VM_FAULT_SET_HINDEX(hstate_index(h)); 3708 VM_FAULT_SET_HINDEX(hstate_index(h));
3709 } else {
3710 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3711 if (!ptep)
3712 return VM_FAULT_OOM;
3696 } 3713 }
3697 3714
3698 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3699 if (!ptep)
3700 return VM_FAULT_OOM;
3701
3702 mapping = vma->vm_file->f_mapping; 3715 mapping = vma->vm_file->f_mapping;
3703 idx = vma_hugecache_offset(h, vma, address); 3716 idx = vma_hugecache_offset(h, vma, address);
3704 3717
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9acfb165eb52..e234c21a5e6c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2128,7 +2128,7 @@ done_restock:
2128 */ 2128 */
2129 do { 2129 do {
2130 if (page_counter_read(&memcg->memory) > memcg->high) { 2130 if (page_counter_read(&memcg->memory) > memcg->high) {
2131 current->memcg_nr_pages_over_high += nr_pages; 2131 current->memcg_nr_pages_over_high += batch;
2132 set_notify_resume(current); 2132 set_notify_resume(current);
2133 break; 2133 break;
2134 } 2134 }
@@ -4779,23 +4779,18 @@ static void mem_cgroup_clear_mc(void)
4779 spin_unlock(&mc.lock); 4779 spin_unlock(&mc.lock);
4780} 4780}
4781 4781
4782static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 4782static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4783 struct cgroup_taskset *tset)
4784{ 4783{
4785 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4784 struct cgroup_subsys_state *css;
4785 struct mem_cgroup *memcg;
4786 struct mem_cgroup *from; 4786 struct mem_cgroup *from;
4787 struct task_struct *leader, *p; 4787 struct task_struct *leader, *p;
4788 struct mm_struct *mm; 4788 struct mm_struct *mm;
4789 unsigned long move_flags; 4789 unsigned long move_flags;
4790 int ret = 0; 4790 int ret = 0;
4791 4791
4792 /* 4792 /* charge immigration isn't supported on the default hierarchy */
4793 * We are now commited to this value whatever it is. Changes in this 4793 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4794 * tunable will only affect upcoming migrations, not the current one.
4795 * So we need to save it, and keep it going.
4796 */
4797 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4798 if (!move_flags)
4799 return 0; 4794 return 0;
4800 4795
4801 /* 4796 /*
@@ -4805,13 +4800,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
4805 * multiple. 4800 * multiple.
4806 */ 4801 */
4807 p = NULL; 4802 p = NULL;
4808 cgroup_taskset_for_each_leader(leader, tset) { 4803 cgroup_taskset_for_each_leader(leader, css, tset) {
4809 WARN_ON_ONCE(p); 4804 WARN_ON_ONCE(p);
4810 p = leader; 4805 p = leader;
4806 memcg = mem_cgroup_from_css(css);
4811 } 4807 }
4812 if (!p) 4808 if (!p)
4813 return 0; 4809 return 0;
4814 4810
4811 /*
4812 * We are now commited to this value whatever it is. Changes in this
4813 * tunable will only affect upcoming migrations, not the current one.
4814 * So we need to save it, and keep it going.
4815 */
4816 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4817 if (!move_flags)
4818 return 0;
4819
4815 from = mem_cgroup_from_task(p); 4820 from = mem_cgroup_from_task(p);
4816 4821
4817 VM_BUG_ON(from == memcg); 4822 VM_BUG_ON(from == memcg);
@@ -4842,8 +4847,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
4842 return ret; 4847 return ret;
4843} 4848}
4844 4849
4845static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 4850static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4846 struct cgroup_taskset *tset)
4847{ 4851{
4848 if (mc.to) 4852 if (mc.to)
4849 mem_cgroup_clear_mc(); 4853 mem_cgroup_clear_mc();
@@ -4985,10 +4989,10 @@ retry:
4985 atomic_dec(&mc.from->moving_account); 4989 atomic_dec(&mc.from->moving_account);
4986} 4990}
4987 4991
4988static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 4992static void mem_cgroup_move_task(struct cgroup_taskset *tset)
4989 struct cgroup_taskset *tset)
4990{ 4993{
4991 struct task_struct *p = cgroup_taskset_first(tset); 4994 struct cgroup_subsys_state *css;
4995 struct task_struct *p = cgroup_taskset_first(tset, &css);
4992 struct mm_struct *mm = get_task_mm(p); 4996 struct mm_struct *mm = get_task_mm(p);
4993 4997
4994 if (mm) { 4998 if (mm) {
@@ -5000,17 +5004,14 @@ static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5000 mem_cgroup_clear_mc(); 5004 mem_cgroup_clear_mc();
5001} 5005}
5002#else /* !CONFIG_MMU */ 5006#else /* !CONFIG_MMU */
5003static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 5007static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5004 struct cgroup_taskset *tset)
5005{ 5008{
5006 return 0; 5009 return 0;
5007} 5010}
5008static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 5011static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5009 struct cgroup_taskset *tset)
5010{ 5012{
5011} 5013}
5012static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 5014static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5013 struct cgroup_taskset *tset)
5014{ 5015{
5015} 5016}
5016#endif 5017#endif
@@ -5511,11 +5512,11 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
5511 * mem_cgroup_replace_page - migrate a charge to another page 5512 * mem_cgroup_replace_page - migrate a charge to another page
5512 * @oldpage: currently charged page 5513 * @oldpage: currently charged page
5513 * @newpage: page to transfer the charge to 5514 * @newpage: page to transfer the charge to
5514 * @lrucare: either or both pages might be on the LRU already
5515 * 5515 *
5516 * Migrate the charge from @oldpage to @newpage. 5516 * Migrate the charge from @oldpage to @newpage.
5517 * 5517 *
5518 * Both pages must be locked, @newpage->mapping must be set up. 5518 * Both pages must be locked, @newpage->mapping must be set up.
5519 * Either or both pages might be on the LRU already.
5519 */ 5520 */
5520void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) 5521void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5521{ 5522{
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d13a33918fa2..c12680993ff3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -608,6 +608,8 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
608 continue; 608 continue;
609 if (unlikely(p->flags & PF_KTHREAD)) 609 if (unlikely(p->flags & PF_KTHREAD))
610 continue; 610 continue;
611 if (is_global_init(p))
612 continue;
611 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) 613 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
612 continue; 614 continue;
613 615
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3e4d65445fa7..d15d88c8efa1 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2,7 +2,7 @@
2 * mm/page-writeback.c 2 * mm/page-writeback.c
3 * 3 *
4 * Copyright (C) 2002, Linus Torvalds. 4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
6 * 6 *
7 * Contains functions related to writing back dirty pages at the 7 * Contains functions related to writing back dirty pages at the
8 * address_space level. 8 * address_space level.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 17a3c66639a9..9d666df5ef95 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3647,8 +3647,9 @@ static void show_migration_types(unsigned char type)
3647{ 3647{
3648 static const char types[MIGRATE_TYPES] = { 3648 static const char types[MIGRATE_TYPES] = {
3649 [MIGRATE_UNMOVABLE] = 'U', 3649 [MIGRATE_UNMOVABLE] = 'U',
3650 [MIGRATE_RECLAIMABLE] = 'E',
3651 [MIGRATE_MOVABLE] = 'M', 3650 [MIGRATE_MOVABLE] = 'M',
3651 [MIGRATE_RECLAIMABLE] = 'E',
3652 [MIGRATE_HIGHATOMIC] = 'H',
3652#ifdef CONFIG_CMA 3653#ifdef CONFIG_CMA
3653 [MIGRATE_CMA] = 'C', 3654 [MIGRATE_CMA] = 'C',
3654#endif 3655#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 9187eee4128b..2afcdbbdb685 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -843,14 +843,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
843 list_add_tail(&info->swaplist, &shmem_swaplist); 843 list_add_tail(&info->swaplist, &shmem_swaplist);
844 844
845 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 845 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
846 swap_shmem_alloc(swap);
847 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
848
849 spin_lock(&info->lock); 846 spin_lock(&info->lock);
850 info->swapped++;
851 shmem_recalc_inode(inode); 847 shmem_recalc_inode(inode);
848 info->swapped++;
852 spin_unlock(&info->lock); 849 spin_unlock(&info->lock);
853 850
851 swap_shmem_alloc(swap);
852 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
853
854 mutex_unlock(&shmem_swaplist_mutex); 854 mutex_unlock(&shmem_swaplist_mutex);
855 BUG_ON(page_mapped(page)); 855 BUG_ON(page_mapped(page));
856 swap_writepage(page, wbc); 856 swap_writepage(page, wbc);
@@ -1078,7 +1078,7 @@ repeat:
1078 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1078 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1079 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1079 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1080 error = -EINVAL; 1080 error = -EINVAL;
1081 goto failed; 1081 goto unlock;
1082 } 1082 }
1083 1083
1084 if (page && sgp == SGP_WRITE) 1084 if (page && sgp == SGP_WRITE)
@@ -1246,11 +1246,15 @@ clear:
1246 /* Perhaps the file has been truncated since we checked */ 1246 /* Perhaps the file has been truncated since we checked */
1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1248 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1248 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1249 if (alloced) {
1250 ClearPageDirty(page);
1251 delete_from_page_cache(page);
1252 spin_lock(&info->lock);
1253 shmem_recalc_inode(inode);
1254 spin_unlock(&info->lock);
1255 }
1249 error = -EINVAL; 1256 error = -EINVAL;
1250 if (alloced) 1257 goto unlock;
1251 goto trunc;
1252 else
1253 goto failed;
1254 } 1258 }
1255 *pagep = page; 1259 *pagep = page;
1256 return 0; 1260 return 0;
@@ -1258,23 +1262,13 @@ clear:
1258 /* 1262 /*
1259 * Error recovery. 1263 * Error recovery.
1260 */ 1264 */
1261trunc:
1262 info = SHMEM_I(inode);
1263 ClearPageDirty(page);
1264 delete_from_page_cache(page);
1265 spin_lock(&info->lock);
1266 info->alloced--;
1267 inode->i_blocks -= BLOCKS_PER_PAGE;
1268 spin_unlock(&info->lock);
1269decused: 1265decused:
1270 sbinfo = SHMEM_SB(inode->i_sb);
1271 if (sbinfo->max_blocks) 1266 if (sbinfo->max_blocks)
1272 percpu_counter_add(&sbinfo->used_blocks, -1); 1267 percpu_counter_add(&sbinfo->used_blocks, -1);
1273unacct: 1268unacct:
1274 shmem_unacct_blocks(info->flags, 1); 1269 shmem_unacct_blocks(info->flags, 1);
1275failed: 1270failed:
1276 if (swap.val && error != -EINVAL && 1271 if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1277 !shmem_confirm_swap(mapping, index, swap))
1278 error = -EEXIST; 1272 error = -EEXIST;
1279unlock: 1273unlock:
1280 if (page) { 1274 if (page) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 879a2be23325..0d5712b0206c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -921,8 +921,8 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
921#ifdef CONFIG_PROC_FS 921#ifdef CONFIG_PROC_FS
922static char * const migratetype_names[MIGRATE_TYPES] = { 922static char * const migratetype_names[MIGRATE_TYPES] = {
923 "Unmovable", 923 "Unmovable",
924 "Reclaimable",
925 "Movable", 924 "Movable",
925 "Reclaimable",
926 "HighAtomic", 926 "HighAtomic",
927#ifdef CONFIG_CMA 927#ifdef CONFIG_CMA
928 "CMA", 928 "CMA",
@@ -1379,6 +1379,7 @@ static const struct file_operations proc_vmstat_file_operations = {
1379#endif /* CONFIG_PROC_FS */ 1379#endif /* CONFIG_PROC_FS */
1380 1380
1381#ifdef CONFIG_SMP 1381#ifdef CONFIG_SMP
1382static struct workqueue_struct *vmstat_wq;
1382static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1383static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1383int sysctl_stat_interval __read_mostly = HZ; 1384int sysctl_stat_interval __read_mostly = HZ;
1384static cpumask_var_t cpu_stat_off; 1385static cpumask_var_t cpu_stat_off;
@@ -1391,7 +1392,7 @@ static void vmstat_update(struct work_struct *w)
1391 * to occur in the future. Keep on running the 1392 * to occur in the future. Keep on running the
1392 * update worker thread. 1393 * update worker thread.
1393 */ 1394 */
1394 schedule_delayed_work_on(smp_processor_id(), 1395 queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1395 this_cpu_ptr(&vmstat_work), 1396 this_cpu_ptr(&vmstat_work),
1396 round_jiffies_relative(sysctl_stat_interval)); 1397 round_jiffies_relative(sysctl_stat_interval));
1397 } else { 1398 } else {
@@ -1460,7 +1461,7 @@ static void vmstat_shepherd(struct work_struct *w)
1460 if (need_update(cpu) && 1461 if (need_update(cpu) &&
1461 cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) 1462 cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1462 1463
1463 schedule_delayed_work_on(cpu, 1464 queue_delayed_work_on(cpu, vmstat_wq,
1464 &per_cpu(vmstat_work, cpu), 0); 1465 &per_cpu(vmstat_work, cpu), 0);
1465 1466
1466 put_online_cpus(); 1467 put_online_cpus();
@@ -1549,6 +1550,7 @@ static int __init setup_vmstat(void)
1549 1550
1550 start_shepherd_timer(); 1551 start_shepherd_timer();
1551 cpu_notifier_register_done(); 1552 cpu_notifier_register_done();
1553 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1552#endif 1554#endif
1553#ifdef CONFIG_PROC_FS 1555#ifdef CONFIG_PROC_FS
1554 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); 1556 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
diff --git a/mm/zswap.c b/mm/zswap.c
index 025f8dc723de..bf14508afd64 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -541,6 +541,7 @@ static struct zswap_pool *zswap_pool_last_get(void)
541 return last; 541 return last;
542} 542}
543 543
544/* type and compressor must be null-terminated */
544static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 545static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
545{ 546{
546 struct zswap_pool *pool; 547 struct zswap_pool *pool;
@@ -548,10 +549,9 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
548 assert_spin_locked(&zswap_pools_lock); 549 assert_spin_locked(&zswap_pools_lock);
549 550
550 list_for_each_entry_rcu(pool, &zswap_pools, list) { 551 list_for_each_entry_rcu(pool, &zswap_pools, list) {
551 if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name))) 552 if (strcmp(pool->tfm_name, compressor))
552 continue; 553 continue;
553 if (strncmp(zpool_get_type(pool->zpool), type, 554 if (strcmp(zpool_get_type(pool->zpool), type))
554 sizeof(zswap_zpool_type)))
555 continue; 555 continue;
556 /* if we can't get it, it's about to be destroyed */ 556 /* if we can't get it, it's about to be destroyed */
557 if (!zswap_pool_get(pool)) 557 if (!zswap_pool_get(pool))
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index ae3a47f9d1d5..fbd0acf80b13 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -805,6 +805,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
805 struct sock *sk; 805 struct sock *sk;
806 ax25_cb *ax25; 806 ax25_cb *ax25;
807 807
808 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
809 return -EINVAL;
810
808 if (!net_eq(net, &init_net)) 811 if (!net_eq(net, &init_net))
809 return -EAFNOSUPPORT; 812 return -EAFNOSUPPORT;
810 813
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 83bc1aaf5800..a49c705fb86b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -566,6 +566,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
566 int select; 566 int select;
567 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; 567 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
568 struct batadv_dat_candidate *res; 568 struct batadv_dat_candidate *res;
569 struct batadv_dat_entry dat;
569 570
570 if (!bat_priv->orig_hash) 571 if (!bat_priv->orig_hash)
571 return NULL; 572 return NULL;
@@ -575,7 +576,9 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
575 if (!res) 576 if (!res)
576 return NULL; 577 return NULL;
577 578
578 ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, 579 dat.ip = ip_dst;
580 dat.vid = 0;
581 ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
579 BATADV_DAT_ADDR_MAX); 582 BATADV_DAT_ADDR_MAX);
580 583
581 batadv_dbg(BATADV_DBG_DAT, bat_priv, 584 batadv_dbg(BATADV_DBG_DAT, bat_priv,
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 8d990b070a2e..3207667e69de 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -836,6 +836,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
836 u8 *orig_addr; 836 u8 *orig_addr;
837 struct batadv_orig_node *orig_node = NULL; 837 struct batadv_orig_node *orig_node = NULL;
838 int check, hdr_size = sizeof(*unicast_packet); 838 int check, hdr_size = sizeof(*unicast_packet);
839 enum batadv_subtype subtype;
839 bool is4addr; 840 bool is4addr;
840 841
841 unicast_packet = (struct batadv_unicast_packet *)skb->data; 842 unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -863,10 +864,20 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
863 /* packet for me */ 864 /* packet for me */
864 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { 865 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
865 if (is4addr) { 866 if (is4addr) {
866 batadv_dat_inc_counter(bat_priv, 867 subtype = unicast_4addr_packet->subtype;
867 unicast_4addr_packet->subtype); 868 batadv_dat_inc_counter(bat_priv, subtype);
868 orig_addr = unicast_4addr_packet->src; 869
869 orig_node = batadv_orig_hash_find(bat_priv, orig_addr); 870 /* Only payload data should be considered for speedy
871 * join. For example, DAT also uses unicast 4addr
872 * types, but those packets should not be considered
873 * for speedy join, since the clients do not actually
874 * reside at the sending originator.
875 */
876 if (subtype == BATADV_P_DATA) {
877 orig_addr = unicast_4addr_packet->src;
878 orig_node = batadv_orig_hash_find(bat_priv,
879 orig_addr);
880 }
870 } 881 }
871 882
872 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, 883 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 4228b10c47ea..76f19ba62462 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -68,13 +68,15 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
68 unsigned short vid, const char *message, 68 unsigned short vid, const char *message,
69 bool roaming); 69 bool roaming);
70 70
71/* returns 1 if they are the same mac addr */ 71/* returns 1 if they are the same mac addr and vid */
72static int batadv_compare_tt(const struct hlist_node *node, const void *data2) 72static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
73{ 73{
74 const void *data1 = container_of(node, struct batadv_tt_common_entry, 74 const void *data1 = container_of(node, struct batadv_tt_common_entry,
75 hash_entry); 75 hash_entry);
76 const struct batadv_tt_common_entry *tt1 = data1;
77 const struct batadv_tt_common_entry *tt2 = data2;
76 78
77 return batadv_compare_eth(data1, data2); 79 return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2);
78} 80}
79 81
80/** 82/**
@@ -1427,9 +1429,15 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1427 } 1429 }
1428 1430
1429 /* if the client was temporary added before receiving the first 1431 /* if the client was temporary added before receiving the first
1430 * OGM announcing it, we have to clear the TEMP flag 1432 * OGM announcing it, we have to clear the TEMP flag. Also,
1433 * remove the previous temporary orig node and re-add it
1434 * if required. If the orig entry changed, the new one which
1435 * is a non-temporary entry is preferred.
1431 */ 1436 */
1432 common->flags &= ~BATADV_TT_CLIENT_TEMP; 1437 if (common->flags & BATADV_TT_CLIENT_TEMP) {
1438 batadv_tt_global_del_orig_list(tt_global_entry);
1439 common->flags &= ~BATADV_TT_CLIENT_TEMP;
1440 }
1433 1441
1434 /* the change can carry possible "attribute" flags like the 1442 /* the change can carry possible "attribute" flags like the
1435 * TT_CLIENT_WIFI, therefore they have to be copied in the 1443 * TT_CLIENT_WIFI, therefore they have to be copied in the
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index fe129663bd3f..f52bcbf2e58c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -526,6 +526,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
526 if (!addr || addr->sa_family != AF_BLUETOOTH) 526 if (!addr || addr->sa_family != AF_BLUETOOTH)
527 return -EINVAL; 527 return -EINVAL;
528 528
529 if (addr_len < sizeof(struct sockaddr_sco))
530 return -EINVAL;
531
529 lock_sock(sk); 532 lock_sock(sk);
530 533
531 if (sk->sk_state != BT_OPEN) { 534 if (sk->sk_state != BT_OPEN) {
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 2e4df84c34a1..d9ee8d08a3a6 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -81,9 +81,11 @@ static void update_classid(struct cgroup_subsys_state *css, void *v)
81 css_task_iter_end(&it); 81 css_task_iter_end(&it);
82} 82}
83 83
84static void cgrp_attach(struct cgroup_subsys_state *css, 84static void cgrp_attach(struct cgroup_taskset *tset)
85 struct cgroup_taskset *tset)
86{ 85{
86 struct cgroup_subsys_state *css;
87
88 cgroup_taskset_first(tset, &css);
87 update_classid(css, 89 update_classid(css,
88 (void *)(unsigned long)css_cls_state(css)->classid); 90 (void *)(unsigned long)css_cls_state(css)->classid);
89} 91}
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index cbd0a199bf52..40fd09fe06ae 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -218,13 +218,14 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
218 return 0; 218 return 0;
219} 219}
220 220
221static void net_prio_attach(struct cgroup_subsys_state *css, 221static void net_prio_attach(struct cgroup_taskset *tset)
222 struct cgroup_taskset *tset)
223{ 222{
224 struct task_struct *p; 223 struct task_struct *p;
225 void *v = (void *)(unsigned long)css->cgroup->id; 224 struct cgroup_subsys_state *css;
225
226 cgroup_taskset_for_each(p, css, tset) {
227 void *v = (void *)(unsigned long)css->cgroup->id;
226 228
227 cgroup_taskset_for_each(p, tset) {
228 task_lock(p); 229 task_lock(p);
229 iterate_fd(p->files, 0, update_netprio, v); 230 iterate_fd(p->files, 0, update_netprio, v);
230 task_unlock(p); 231 task_unlock(p);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 152b9c70e252..b2df375ec9c2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3643,7 +3643,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3643 serr->ee.ee_info = tstype; 3643 serr->ee.ee_info = tstype;
3644 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3644 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3645 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3645 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3646 if (sk->sk_protocol == IPPROTO_TCP) 3646 if (sk->sk_protocol == IPPROTO_TCP &&
3647 sk->sk_type == SOCK_STREAM)
3647 serr->ee.ee_data -= sk->sk_tskey; 3648 serr->ee.ee_data -= sk->sk_tskey;
3648 } 3649 }
3649 3650
@@ -4268,7 +4269,7 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4268 return NULL; 4269 return NULL;
4269 } 4270 }
4270 4271
4271 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len, 4272 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
4272 2 * ETH_ALEN); 4273 2 * ETH_ALEN);
4273 skb->mac_header += VLAN_HLEN; 4274 skb->mac_header += VLAN_HLEN;
4274 return skb; 4275 return skb;
diff --git a/net/core/sock.c b/net/core/sock.c
index e31dfcee1729..0d91f7dca751 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -433,8 +433,6 @@ static bool sock_needs_netstamp(const struct sock *sk)
433 } 433 }
434} 434}
435 435
436#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
437
438static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 436static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
439{ 437{
440 if (sk->sk_flags & flags) { 438 if (sk->sk_flags & flags) {
@@ -874,7 +872,8 @@ set_rcvbuf:
874 872
875 if (val & SOF_TIMESTAMPING_OPT_ID && 873 if (val & SOF_TIMESTAMPING_OPT_ID &&
876 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 874 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
877 if (sk->sk_protocol == IPPROTO_TCP) { 875 if (sk->sk_protocol == IPPROTO_TCP &&
876 sk->sk_type == SOCK_STREAM) {
878 if (sk->sk_state != TCP_ESTABLISHED) { 877 if (sk->sk_state != TCP_ESTABLISHED) {
879 ret = -EINVAL; 878 ret = -EINVAL;
880 break; 879 break;
@@ -1552,7 +1551,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1552 */ 1551 */
1553 is_charged = sk_filter_charge(newsk, filter); 1552 is_charged = sk_filter_charge(newsk, filter);
1554 1553
1555 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) { 1554 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1556 /* It is still raw copy of parent, so invalidate 1555 /* It is still raw copy of parent, so invalidate
1557 * destructor and make plain sk_free() */ 1556 * destructor and make plain sk_free() */
1558 newsk->sk_destruct = NULL; 1557 newsk->sk_destruct = NULL;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index eebf5ac8ce18..13d6b1a6e0fc 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
678{ 678{
679 struct sock *sk; 679 struct sock *sk;
680 680
681 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
682 return -EINVAL;
683
681 if (!net_eq(net, &init_net)) 684 if (!net_eq(net, &init_net))
682 return -EAFNOSUPPORT; 685 return -EAFNOSUPPORT;
683 686
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 11c4ca13ec3b..5c5db6636704 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -257,6 +257,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
257 int try_loading_module = 0; 257 int try_loading_module = 0;
258 int err; 258 int err;
259 259
260 if (protocol < 0 || protocol >= IPPROTO_MAX)
261 return -EINVAL;
262
260 sock->state = SS_UNCONNECTED; 263 sock->state = SS_UNCONNECTED;
261 264
262 /* Look for the requested type/protocol pair. */ 265 /* Look for the requested type/protocol pair. */
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cc8f3e506cde..473447593060 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1155,6 +1155,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
1155static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1155static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1156{ 1156{
1157 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1157 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1158 struct netdev_notifier_changeupper_info *info;
1158 struct in_device *in_dev; 1159 struct in_device *in_dev;
1159 struct net *net = dev_net(dev); 1160 struct net *net = dev_net(dev);
1160 unsigned int flags; 1161 unsigned int flags;
@@ -1193,6 +1194,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1193 case NETDEV_CHANGEMTU: 1194 case NETDEV_CHANGEMTU:
1194 rt_cache_flush(net); 1195 rt_cache_flush(net);
1195 break; 1196 break;
1197 case NETDEV_CHANGEUPPER:
1198 info = ptr;
1199 /* flush all routes if dev is linked to or unlinked from
1200 * an L3 master device (e.g., VRF)
1201 */
1202 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
1203 fib_disable_ip(dev, NETDEV_DOWN, true);
1204 break;
1196 } 1205 }
1197 return NOTIFY_DONE; 1206 return NOTIFY_DONE;
1198} 1207}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index e0fcbbbcfe54..bd903fe0f750 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -24,6 +24,7 @@ struct fou {
24 u16 type; 24 u16 type;
25 struct udp_offload udp_offloads; 25 struct udp_offload udp_offloads;
26 struct list_head list; 26 struct list_head list;
27 struct rcu_head rcu;
27}; 28};
28 29
29#define FOU_F_REMCSUM_NOPARTIAL BIT(0) 30#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
@@ -417,7 +418,7 @@ static void fou_release(struct fou *fou)
417 list_del(&fou->list); 418 list_del(&fou->list);
418 udp_tunnel_sock_release(sock); 419 udp_tunnel_sock_release(sock);
419 420
420 kfree(fou); 421 kfree_rcu(fou, rcu);
421} 422}
422 423
423static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) 424static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index a35584176535..c187c60e3e0c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -60,6 +60,7 @@ config NFT_REJECT_IPV4
60 60
61config NFT_DUP_IPV4 61config NFT_DUP_IPV4
62 tristate "IPv4 nf_tables packet duplication support" 62 tristate "IPv4 nf_tables packet duplication support"
63 depends on !NF_CONNTRACK || NF_CONNTRACK
63 select NF_DUP_IPV4 64 select NF_DUP_IPV4
64 help 65 help
65 This module enables IPv4 packet duplication support for nf_tables. 66 This module enables IPv4 packet duplication support for nf_tables.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index db003438aaf5..d8841a2f1569 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1493,7 +1493,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1493 if (likely(sk->sk_rx_dst)) 1493 if (likely(sk->sk_rx_dst))
1494 skb_dst_drop(skb); 1494 skb_dst_drop(skb);
1495 else 1495 else
1496 skb_dst_force(skb); 1496 skb_dst_force_safe(skb);
1497 1497
1498 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1498 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1499 tp->ucopy.memory += skb->truesize; 1499 tp->ucopy.memory += skb->truesize;
@@ -1721,8 +1721,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1721{ 1721{
1722 struct dst_entry *dst = skb_dst(skb); 1722 struct dst_entry *dst = skb_dst(skb);
1723 1723
1724 if (dst) { 1724 if (dst && dst_hold_safe(dst)) {
1725 dst_hold(dst);
1726 sk->sk_rx_dst = dst; 1725 sk->sk_rx_dst = dst;
1727 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1726 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1728 } 1727 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cb7ca569052c..9bfc39ff2285 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3150,7 +3150,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3150{ 3150{
3151 struct tcp_sock *tp = tcp_sk(sk); 3151 struct tcp_sock *tp = tcp_sk(sk);
3152 struct tcp_fastopen_request *fo = tp->fastopen_req; 3152 struct tcp_fastopen_request *fo = tp->fastopen_req;
3153 int syn_loss = 0, space, err = 0, copied; 3153 int syn_loss = 0, space, err = 0;
3154 unsigned long last_syn_loss = 0; 3154 unsigned long last_syn_loss = 0;
3155 struct sk_buff *syn_data; 3155 struct sk_buff *syn_data;
3156 3156
@@ -3188,17 +3188,18 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3188 goto fallback; 3188 goto fallback;
3189 syn_data->ip_summed = CHECKSUM_PARTIAL; 3189 syn_data->ip_summed = CHECKSUM_PARTIAL;
3190 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 3190 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
3191 copied = copy_from_iter(skb_put(syn_data, space), space, 3191 if (space) {
3192 &fo->data->msg_iter); 3192 int copied = copy_from_iter(skb_put(syn_data, space), space,
3193 if (unlikely(!copied)) { 3193 &fo->data->msg_iter);
3194 kfree_skb(syn_data); 3194 if (unlikely(!copied)) {
3195 goto fallback; 3195 kfree_skb(syn_data);
3196 } 3196 goto fallback;
3197 if (copied != space) { 3197 }
3198 skb_trim(syn_data, copied); 3198 if (copied != space) {
3199 space = copied; 3199 skb_trim(syn_data, copied);
3200 space = copied;
3201 }
3200 } 3202 }
3201
3202 /* No more data pending in inet_wait_for_connect() */ 3203 /* No more data pending in inet_wait_for_connect() */
3203 if (space == fo->size) 3204 if (space == fo->size)
3204 fo->data = NULL; 3205 fo->data = NULL;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 61f26851655c..17f8e7ea133b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -350,6 +350,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
350 setup_timer(&ndev->rs_timer, addrconf_rs_timer, 350 setup_timer(&ndev->rs_timer, addrconf_rs_timer,
351 (unsigned long)ndev); 351 (unsigned long)ndev);
352 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); 352 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
353
354 if (ndev->cnf.stable_secret.initialized)
355 ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
356 else
357 ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
358
353 ndev->cnf.mtu6 = dev->mtu; 359 ndev->cnf.mtu6 = dev->mtu;
354 ndev->cnf.sysctl = NULL; 360 ndev->cnf.sysctl = NULL;
355 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); 361 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -2455,7 +2461,7 @@ ok:
2455#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2461#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2456 if (in6_dev->cnf.optimistic_dad && 2462 if (in6_dev->cnf.optimistic_dad &&
2457 !net->ipv6.devconf_all->forwarding && sllao) 2463 !net->ipv6.devconf_all->forwarding && sllao)
2458 addr_flags = IFA_F_OPTIMISTIC; 2464 addr_flags |= IFA_F_OPTIMISTIC;
2459#endif 2465#endif
2460 2466
2461 /* Do not allow to create too much of autoconfigured 2467 /* Do not allow to create too much of autoconfigured
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 8ec0df75f1c4..9f5137cd604e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
109 int try_loading_module = 0; 109 int try_loading_module = 0;
110 int err; 110 int err;
111 111
112 if (protocol < 0 || protocol >= IPPROTO_MAX)
113 return -EINVAL;
114
112 /* Look for the requested type/protocol pair. */ 115 /* Look for the requested type/protocol pair. */
113lookup_protocol: 116lookup_protocol:
114 err = -ESOCKTNOSUPPORT; 117 err = -ESOCKTNOSUPPORT;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3c7b9310b33f..e5ea177d34c6 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1571,13 +1571,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1571 return -EEXIST; 1571 return -EEXIST;
1572 } else { 1572 } else {
1573 t = nt; 1573 t = nt;
1574
1575 ip6gre_tunnel_unlink(ign, t);
1576 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1577 ip6gre_tunnel_link(ign, t);
1578 netdev_state_change(dev);
1579 } 1574 }
1580 1575
1576 ip6gre_tunnel_unlink(ign, t);
1577 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1578 ip6gre_tunnel_link(ign, t);
1581 return 0; 1579 return 0;
1582} 1580}
1583 1581
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index f6a024e141e5..e10a04c9cdc7 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -49,6 +49,7 @@ config NFT_REJECT_IPV6
49 49
50config NFT_DUP_IPV6 50config NFT_DUP_IPV6
51 tristate "IPv6 nf_tables packet duplication support" 51 tristate "IPv6 nf_tables packet duplication support"
52 depends on !NF_CONNTRACK || NF_CONNTRACK
52 select NF_DUP_IPV6 53 select NF_DUP_IPV6
53 help 54 help
54 This module enables IPv6 packet duplication support for nf_tables. 55 This module enables IPv6 packet duplication support for nf_tables.
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e7aab561b7b4..6b8a8a9091fa 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93{ 93{
94 struct dst_entry *dst = skb_dst(skb); 94 struct dst_entry *dst = skb_dst(skb);
95 95
96 if (dst) { 96 if (dst && dst_hold_safe(dst)) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst; 97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98 98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst; 99 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index e6aa48b5395c..923abd6b3064 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1086,6 +1086,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
1086 struct sock *sk; 1086 struct sock *sk;
1087 struct irda_sock *self; 1087 struct irda_sock *self;
1088 1088
1089 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
1090 return -EINVAL;
1091
1089 if (net != &init_net) 1092 if (net != &init_net)
1090 return -EAFNOSUPPORT; 1093 return -EAFNOSUPPORT;
1091 1094
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index da471eef07bb..c12f348138ac 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1169,8 +1169,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1169 * rc isn't initialized here yet, so ignore it 1169 * rc isn't initialized here yet, so ignore it
1170 */ 1170 */
1171 __ieee80211_vht_handle_opmode(sdata, sta, 1171 __ieee80211_vht_handle_opmode(sdata, sta,
1172 params->opmode_notif, 1172 params->opmode_notif, band);
1173 band, false);
1174 } 1173 }
1175 1174
1176 if (ieee80211_vif_is_mesh(&sdata->vif)) 1175 if (ieee80211_vif_is_mesh(&sdata->vif))
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d832bd59236b..5322b4c71630 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1709,10 +1709,10 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
1709void ieee80211_sta_set_rx_nss(struct sta_info *sta); 1709void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1710u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1710u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1711 struct sta_info *sta, u8 opmode, 1711 struct sta_info *sta, u8 opmode,
1712 enum ieee80211_band band, bool nss_only); 1712 enum ieee80211_band band);
1713void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1713void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1714 struct sta_info *sta, u8 opmode, 1714 struct sta_info *sta, u8 opmode,
1715 enum ieee80211_band band, bool nss_only); 1715 enum ieee80211_band band);
1716void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, 1716void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
1717 struct ieee80211_sta_vht_cap *vht_cap); 1717 struct ieee80211_sta_vht_cap *vht_cap);
1718void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, 1718void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b140cc6651f4..3aa04344942b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1379,21 +1379,26 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1379 */ 1379 */
1380 if (has_80211h_pwr && 1380 if (has_80211h_pwr &&
1381 (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { 1381 (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) {
1382 new_ap_level = pwr_level_80211h;
1383
1384 if (sdata->ap_power_level == new_ap_level)
1385 return 0;
1386
1382 sdata_dbg(sdata, 1387 sdata_dbg(sdata,
1383 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", 1388 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
1384 pwr_level_80211h, chan_pwr, pwr_reduction_80211h, 1389 pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
1385 sdata->u.mgd.bssid); 1390 sdata->u.mgd.bssid);
1386 new_ap_level = pwr_level_80211h;
1387 } else { /* has_cisco_pwr is always true here. */ 1391 } else { /* has_cisco_pwr is always true here. */
1392 new_ap_level = pwr_level_cisco;
1393
1394 if (sdata->ap_power_level == new_ap_level)
1395 return 0;
1396
1388 sdata_dbg(sdata, 1397 sdata_dbg(sdata,
1389 "Limiting TX power to %d dBm as advertised by %pM\n", 1398 "Limiting TX power to %d dBm as advertised by %pM\n",
1390 pwr_level_cisco, sdata->u.mgd.bssid); 1399 pwr_level_cisco, sdata->u.mgd.bssid);
1391 new_ap_level = pwr_level_cisco;
1392 } 1400 }
1393 1401
1394 if (sdata->ap_power_level == new_ap_level)
1395 return 0;
1396
1397 sdata->ap_power_level = new_ap_level; 1402 sdata->ap_power_level = new_ap_level;
1398 if (__ieee80211_recalc_txpower(sdata)) 1403 if (__ieee80211_recalc_txpower(sdata))
1399 return BSS_CHANGED_TXPOWER; 1404 return BSS_CHANGED_TXPOWER;
@@ -3575,7 +3580,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
3575 3580
3576 if (sta && elems.opmode_notif) 3581 if (sta && elems.opmode_notif)
3577 ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, 3582 ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif,
3578 rx_status->band, true); 3583 rx_status->band);
3579 mutex_unlock(&local->sta_mtx); 3584 mutex_unlock(&local->sta_mtx);
3580 3585
3581 changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, 3586 changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8bae5de0dc44..82af407fea7a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2736,8 +2736,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2736 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; 2736 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
2737 2737
2738 ieee80211_vht_handle_opmode(rx->sdata, rx->sta, 2738 ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
2739 opmode, status->band, 2739 opmode, status->band);
2740 false);
2741 goto handled; 2740 goto handled;
2742 } 2741 }
2743 default: 2742 default:
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 74058020b7d6..33344f5a66a8 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1641,6 +1641,29 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1641 drv_stop(local); 1641 drv_stop(local);
1642} 1642}
1643 1643
1644static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
1645 bool aborted)
1646{
1647 /* It's possible that we don't handle the scan completion in
1648 * time during suspend, so if it's still marked as completed
1649 * here, queue the work and flush it to clean things up.
1650 * Instead of calling the worker function directly here, we
1651 * really queue it to avoid potential races with other flows
1652 * scheduling the same work.
1653 */
1654 if (test_bit(SCAN_COMPLETED, &local->scanning)) {
1655 /* If coming from reconfiguration failure, abort the scan so
1656 * we don't attempt to continue a partial HW scan - which is
1657 * possible otherwise if (e.g.) the 2.4 GHz portion was the
1658 * completed scan, and a 5 GHz portion is still pending.
1659 */
1660 if (aborted)
1661 set_bit(SCAN_ABORTED, &local->scanning);
1662 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
1663 flush_delayed_work(&local->scan_work);
1664 }
1665}
1666
1644static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) 1667static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
1645{ 1668{
1646 struct ieee80211_sub_if_data *sdata; 1669 struct ieee80211_sub_if_data *sdata;
@@ -1660,6 +1683,8 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
1660 local->suspended = false; 1683 local->suspended = false;
1661 local->in_reconfig = false; 1684 local->in_reconfig = false;
1662 1685
1686 ieee80211_flush_completed_scan(local, true);
1687
1663 /* scheduled scan clearly can't be running any more, but tell 1688 /* scheduled scan clearly can't be running any more, but tell
1664 * cfg80211 and clear local state 1689 * cfg80211 and clear local state
1665 */ 1690 */
@@ -1698,6 +1723,27 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
1698 mutex_unlock(&local->chanctx_mtx); 1723 mutex_unlock(&local->chanctx_mtx);
1699} 1724}
1700 1725
1726static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata)
1727{
1728 struct ieee80211_local *local = sdata->local;
1729 struct sta_info *sta;
1730
1731 /* add STAs back */
1732 mutex_lock(&local->sta_mtx);
1733 list_for_each_entry(sta, &local->sta_list, list) {
1734 enum ieee80211_sta_state state;
1735
1736 if (!sta->uploaded || sta->sdata != sdata)
1737 continue;
1738
1739 for (state = IEEE80211_STA_NOTEXIST;
1740 state < sta->sta_state; state++)
1741 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1742 state + 1));
1743 }
1744 mutex_unlock(&local->sta_mtx);
1745}
1746
1701int ieee80211_reconfig(struct ieee80211_local *local) 1747int ieee80211_reconfig(struct ieee80211_local *local)
1702{ 1748{
1703 struct ieee80211_hw *hw = &local->hw; 1749 struct ieee80211_hw *hw = &local->hw;
@@ -1833,50 +1879,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1833 WARN_ON(drv_add_chanctx(local, ctx)); 1879 WARN_ON(drv_add_chanctx(local, ctx));
1834 mutex_unlock(&local->chanctx_mtx); 1880 mutex_unlock(&local->chanctx_mtx);
1835 1881
1836 list_for_each_entry(sdata, &local->interfaces, list) {
1837 if (!ieee80211_sdata_running(sdata))
1838 continue;
1839 ieee80211_assign_chanctx(local, sdata);
1840 }
1841
1842 sdata = rtnl_dereference(local->monitor_sdata); 1882 sdata = rtnl_dereference(local->monitor_sdata);
1843 if (sdata && ieee80211_sdata_running(sdata)) 1883 if (sdata && ieee80211_sdata_running(sdata))
1844 ieee80211_assign_chanctx(local, sdata); 1884 ieee80211_assign_chanctx(local, sdata);
1845 } 1885 }
1846 1886
1847 /* add STAs back */
1848 mutex_lock(&local->sta_mtx);
1849 list_for_each_entry(sta, &local->sta_list, list) {
1850 enum ieee80211_sta_state state;
1851
1852 if (!sta->uploaded)
1853 continue;
1854
1855 /* AP-mode stations will be added later */
1856 if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
1857 continue;
1858
1859 for (state = IEEE80211_STA_NOTEXIST;
1860 state < sta->sta_state; state++)
1861 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1862 state + 1));
1863 }
1864 mutex_unlock(&local->sta_mtx);
1865
1866 /* reconfigure tx conf */
1867 if (hw->queues >= IEEE80211_NUM_ACS) {
1868 list_for_each_entry(sdata, &local->interfaces, list) {
1869 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1870 sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1871 !ieee80211_sdata_running(sdata))
1872 continue;
1873
1874 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1875 drv_conf_tx(local, sdata, i,
1876 &sdata->tx_conf[i]);
1877 }
1878 }
1879
1880 /* reconfigure hardware */ 1887 /* reconfigure hardware */
1881 ieee80211_hw_config(local, ~0); 1888 ieee80211_hw_config(local, ~0);
1882 1889
@@ -1889,6 +1896,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1889 if (!ieee80211_sdata_running(sdata)) 1896 if (!ieee80211_sdata_running(sdata))
1890 continue; 1897 continue;
1891 1898
1899 ieee80211_assign_chanctx(local, sdata);
1900
1901 switch (sdata->vif.type) {
1902 case NL80211_IFTYPE_AP_VLAN:
1903 case NL80211_IFTYPE_MONITOR:
1904 break;
1905 default:
1906 ieee80211_reconfig_stations(sdata);
1907 /* fall through */
1908 case NL80211_IFTYPE_AP: /* AP stations are handled later */
1909 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1910 drv_conf_tx(local, sdata, i,
1911 &sdata->tx_conf[i]);
1912 break;
1913 }
1914
1892 /* common change flags for all interface types */ 1915 /* common change flags for all interface types */
1893 changed = BSS_CHANGED_ERP_CTS_PROT | 1916 changed = BSS_CHANGED_ERP_CTS_PROT |
1894 BSS_CHANGED_ERP_PREAMBLE | 1917 BSS_CHANGED_ERP_PREAMBLE |
@@ -2074,17 +2097,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2074 mb(); 2097 mb();
2075 local->resuming = false; 2098 local->resuming = false;
2076 2099
2077 /* It's possible that we don't handle the scan completion in 2100 ieee80211_flush_completed_scan(local, false);
2078 * time during suspend, so if it's still marked as completed
2079 * here, queue the work and flush it to clean things up.
2080 * Instead of calling the worker function directly here, we
2081 * really queue it to avoid potential races with other flows
2082 * scheduling the same work.
2083 */
2084 if (test_bit(SCAN_COMPLETED, &local->scanning)) {
2085 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
2086 flush_delayed_work(&local->scan_work);
2087 }
2088 2101
2089 if (local->open_count && !reconfig_due_to_wowlan) 2102 if (local->open_count && !reconfig_due_to_wowlan)
2090 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); 2103 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ff1c798921a6..c38b2f07a919 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -378,7 +378,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
378 378
379u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 379u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
380 struct sta_info *sta, u8 opmode, 380 struct sta_info *sta, u8 opmode,
381 enum ieee80211_band band, bool nss_only) 381 enum ieee80211_band band)
382{ 382{
383 struct ieee80211_local *local = sdata->local; 383 struct ieee80211_local *local = sdata->local;
384 struct ieee80211_supported_band *sband; 384 struct ieee80211_supported_band *sband;
@@ -401,9 +401,6 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
401 changed |= IEEE80211_RC_NSS_CHANGED; 401 changed |= IEEE80211_RC_NSS_CHANGED;
402 } 402 }
403 403
404 if (nss_only)
405 return changed;
406
407 switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { 404 switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
408 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 405 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
409 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; 406 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
@@ -430,13 +427,12 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
430 427
431void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 428void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
432 struct sta_info *sta, u8 opmode, 429 struct sta_info *sta, u8 opmode,
433 enum ieee80211_band band, bool nss_only) 430 enum ieee80211_band band)
434{ 431{
435 struct ieee80211_local *local = sdata->local; 432 struct ieee80211_local *local = sdata->local;
436 struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; 433 struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
437 434
438 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, 435 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
439 band, nss_only);
440 436
441 if (changed > 0) 437 if (changed > 0)
442 rate_control_rate_update(local, sband, sta, changed); 438 rate_control_rate_update(local, sband, sta, changed);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index c70d750148b6..c32fc411a911 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -27,6 +27,8 @@
27 */ 27 */
28#define MAX_MP_SELECT_LABELS 4 28#define MAX_MP_SELECT_LABELS 4
29 29
30#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
31
30static int zero = 0; 32static int zero = 0;
31static int label_limit = (1 << 20) - 1; 33static int label_limit = (1 << 20) - 1;
32 34
@@ -317,7 +319,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
317 } 319 }
318 } 320 }
319 321
320 err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb); 322 /* If via wasn't specified then send out using device address */
323 if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
324 err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
325 out_dev->dev_addr, skb);
326 else
327 err = neigh_xmit(nh->nh_via_table, out_dev,
328 mpls_nh_via(rt, nh), skb);
321 if (err) 329 if (err)
322 net_dbg_ratelimited("%s: packet transmission failed: %d\n", 330 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
323 __func__, err); 331 __func__, err);
@@ -534,6 +542,10 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
534 if (!mpls_dev_get(dev)) 542 if (!mpls_dev_get(dev))
535 goto errout; 543 goto errout;
536 544
545 if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
546 (dev->addr_len != nh->nh_via_alen))
547 goto errout;
548
537 RCU_INIT_POINTER(nh->nh_dev, dev); 549 RCU_INIT_POINTER(nh->nh_dev, dev);
538 550
539 return 0; 551 return 0;
@@ -592,10 +604,14 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt,
592 goto errout; 604 goto errout;
593 } 605 }
594 606
595 err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, 607 if (via) {
596 __mpls_nh_via(rt, nh)); 608 err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
597 if (err) 609 __mpls_nh_via(rt, nh));
598 goto errout; 610 if (err)
611 goto errout;
612 } else {
613 nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
614 }
599 615
600 err = mpls_nh_assign_dev(net, rt, nh, oif); 616 err = mpls_nh_assign_dev(net, rt, nh, oif);
601 if (err) 617 if (err)
@@ -677,9 +693,6 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
677 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); 693 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
678 } 694 }
679 695
680 if (!nla_via)
681 goto errout;
682
683 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, 696 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
684 rtnh->rtnh_ifindex, nla_via, 697 rtnh->rtnh_ifindex, nla_via,
685 nla_newdst); 698 nla_newdst);
@@ -1118,6 +1131,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1118 1131
1119 cfg->rc_label = LABEL_NOT_SPECIFIED; 1132 cfg->rc_label = LABEL_NOT_SPECIFIED;
1120 cfg->rc_protocol = rtm->rtm_protocol; 1133 cfg->rc_protocol = rtm->rtm_protocol;
1134 cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
1121 cfg->rc_nlflags = nlh->nlmsg_flags; 1135 cfg->rc_nlflags = nlh->nlmsg_flags;
1122 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; 1136 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
1123 cfg->rc_nlinfo.nlh = nlh; 1137 cfg->rc_nlinfo.nlh = nlh;
@@ -1231,7 +1245,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1231 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, 1245 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
1232 nh->nh_label)) 1246 nh->nh_label))
1233 goto nla_put_failure; 1247 goto nla_put_failure;
1234 if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), 1248 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1249 nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
1235 nh->nh_via_alen)) 1250 nh->nh_via_alen))
1236 goto nla_put_failure; 1251 goto nla_put_failure;
1237 dev = rtnl_dereference(nh->nh_dev); 1252 dev = rtnl_dereference(nh->nh_dev);
@@ -1257,7 +1272,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1257 nh->nh_labels, 1272 nh->nh_labels,
1258 nh->nh_label)) 1273 nh->nh_label))
1259 goto nla_put_failure; 1274 goto nla_put_failure;
1260 if (nla_put_via(skb, nh->nh_via_table, 1275 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1276 nla_put_via(skb, nh->nh_via_table,
1261 mpls_nh_via(rt, nh), 1277 mpls_nh_via(rt, nh),
1262 nh->nh_via_alen)) 1278 nh->nh_via_alen))
1263 goto nla_put_failure; 1279 goto nla_put_failure;
@@ -1319,7 +1335,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1319 1335
1320 if (nh->nh_dev) 1336 if (nh->nh_dev)
1321 payload += nla_total_size(4); /* RTA_OIF */ 1337 payload += nla_total_size(4); /* RTA_OIF */
1322 payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */ 1338 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
1339 payload += nla_total_size(2 + nh->nh_via_alen);
1323 if (nh->nh_labels) /* RTA_NEWDST */ 1340 if (nh->nh_labels) /* RTA_NEWDST */
1324 payload += nla_total_size(nh->nh_labels * 4); 1341 payload += nla_total_size(nh->nh_labels * 4);
1325 } else { 1342 } else {
@@ -1328,7 +1345,9 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1328 1345
1329 for_nexthops(rt) { 1346 for_nexthops(rt) {
1330 nhsize += nla_total_size(sizeof(struct rtnexthop)); 1347 nhsize += nla_total_size(sizeof(struct rtnexthop));
1331 nhsize += nla_total_size(2 + nh->nh_via_alen); 1348 /* RTA_VIA */
1349 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
1350 nhsize += nla_total_size(2 + nh->nh_via_alen);
1332 if (nh->nh_labels) 1351 if (nh->nh_labels)
1333 nhsize += nla_total_size(nh->nh_labels * 4); 1352 nhsize += nla_total_size(nh->nh_labels * 4);
1334 } endfor_nexthops(rt); 1353 } endfor_nexthops(rt);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 67591aef9cae..64afd3d0b144 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -54,10 +54,10 @@ int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
54 unsigned int ttl; 54 unsigned int ttl;
55 55
56 /* Obtain the ttl */ 56 /* Obtain the ttl */
57 if (skb->protocol == htons(ETH_P_IP)) { 57 if (dst->ops->family == AF_INET) {
58 ttl = ip_hdr(skb)->ttl; 58 ttl = ip_hdr(skb)->ttl;
59 rt = (struct rtable *)dst; 59 rt = (struct rtable *)dst;
60 } else if (skb->protocol == htons(ETH_P_IPV6)) { 60 } else if (dst->ops->family == AF_INET6) {
61 ttl = ipv6_hdr(skb)->hop_limit; 61 ttl = ipv6_hdr(skb)->hop_limit;
62 rt6 = (struct rt6_info *)dst; 62 rt6 = (struct rt6_info *)dst;
63 } else { 63 } else {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 93cc4737018f..2cb429d34c03 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -89,6 +89,7 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
89} 89}
90 90
91static void nft_ctx_init(struct nft_ctx *ctx, 91static void nft_ctx_init(struct nft_ctx *ctx,
92 struct net *net,
92 const struct sk_buff *skb, 93 const struct sk_buff *skb,
93 const struct nlmsghdr *nlh, 94 const struct nlmsghdr *nlh,
94 struct nft_af_info *afi, 95 struct nft_af_info *afi,
@@ -96,7 +97,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
96 struct nft_chain *chain, 97 struct nft_chain *chain,
97 const struct nlattr * const *nla) 98 const struct nlattr * const *nla)
98{ 99{
99 ctx->net = sock_net(skb->sk); 100 ctx->net = net;
100 ctx->afi = afi; 101 ctx->afi = afi;
101 ctx->table = table; 102 ctx->table = table;
102 ctx->chain = chain; 103 ctx->chain = chain;
@@ -672,15 +673,14 @@ err:
672 return ret; 673 return ret;
673} 674}
674 675
675static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, 676static int nf_tables_newtable(struct net *net, struct sock *nlsk,
676 const struct nlmsghdr *nlh, 677 struct sk_buff *skb, const struct nlmsghdr *nlh,
677 const struct nlattr * const nla[]) 678 const struct nlattr * const nla[])
678{ 679{
679 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 680 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
680 const struct nlattr *name; 681 const struct nlattr *name;
681 struct nft_af_info *afi; 682 struct nft_af_info *afi;
682 struct nft_table *table; 683 struct nft_table *table;
683 struct net *net = sock_net(skb->sk);
684 int family = nfmsg->nfgen_family; 684 int family = nfmsg->nfgen_family;
685 u32 flags = 0; 685 u32 flags = 0;
686 struct nft_ctx ctx; 686 struct nft_ctx ctx;
@@ -706,7 +706,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
706 if (nlh->nlmsg_flags & NLM_F_REPLACE) 706 if (nlh->nlmsg_flags & NLM_F_REPLACE)
707 return -EOPNOTSUPP; 707 return -EOPNOTSUPP;
708 708
709 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 709 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
710 return nf_tables_updtable(&ctx); 710 return nf_tables_updtable(&ctx);
711 } 711 }
712 712
@@ -730,7 +730,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
730 INIT_LIST_HEAD(&table->sets); 730 INIT_LIST_HEAD(&table->sets);
731 table->flags = flags; 731 table->flags = flags;
732 732
733 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 733 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
734 err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); 734 err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
735 if (err < 0) 735 if (err < 0)
736 goto err3; 736 goto err3;
@@ -810,18 +810,17 @@ out:
810 return err; 810 return err;
811} 811}
812 812
813static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, 813static int nf_tables_deltable(struct net *net, struct sock *nlsk,
814 const struct nlmsghdr *nlh, 814 struct sk_buff *skb, const struct nlmsghdr *nlh,
815 const struct nlattr * const nla[]) 815 const struct nlattr * const nla[])
816{ 816{
817 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 817 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
818 struct nft_af_info *afi; 818 struct nft_af_info *afi;
819 struct nft_table *table; 819 struct nft_table *table;
820 struct net *net = sock_net(skb->sk);
821 int family = nfmsg->nfgen_family; 820 int family = nfmsg->nfgen_family;
822 struct nft_ctx ctx; 821 struct nft_ctx ctx;
823 822
824 nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); 823 nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla);
825 if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) 824 if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL)
826 return nft_flush(&ctx, family); 825 return nft_flush(&ctx, family);
827 826
@@ -1221,8 +1220,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
1221 } 1220 }
1222} 1221}
1223 1222
1224static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, 1223static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1225 const struct nlmsghdr *nlh, 1224 struct sk_buff *skb, const struct nlmsghdr *nlh,
1226 const struct nlattr * const nla[]) 1225 const struct nlattr * const nla[])
1227{ 1226{
1228 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1227 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
@@ -1232,7 +1231,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1232 struct nft_chain *chain; 1231 struct nft_chain *chain;
1233 struct nft_base_chain *basechain = NULL; 1232 struct nft_base_chain *basechain = NULL;
1234 struct nlattr *ha[NFTA_HOOK_MAX + 1]; 1233 struct nlattr *ha[NFTA_HOOK_MAX + 1];
1235 struct net *net = sock_net(skb->sk);
1236 int family = nfmsg->nfgen_family; 1234 int family = nfmsg->nfgen_family;
1237 struct net_device *dev = NULL; 1235 struct net_device *dev = NULL;
1238 u8 policy = NF_ACCEPT; 1236 u8 policy = NF_ACCEPT;
@@ -1313,7 +1311,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1313 return PTR_ERR(stats); 1311 return PTR_ERR(stats);
1314 } 1312 }
1315 1313
1316 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1314 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
1317 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1315 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1318 sizeof(struct nft_trans_chain)); 1316 sizeof(struct nft_trans_chain));
1319 if (trans == NULL) { 1317 if (trans == NULL) {
@@ -1461,7 +1459,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1461 if (err < 0) 1459 if (err < 0)
1462 goto err1; 1460 goto err1;
1463 1461
1464 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1462 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
1465 err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); 1463 err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
1466 if (err < 0) 1464 if (err < 0)
1467 goto err2; 1465 goto err2;
@@ -1476,15 +1474,14 @@ err1:
1476 return err; 1474 return err;
1477} 1475}
1478 1476
1479static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, 1477static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1480 const struct nlmsghdr *nlh, 1478 struct sk_buff *skb, const struct nlmsghdr *nlh,
1481 const struct nlattr * const nla[]) 1479 const struct nlattr * const nla[])
1482{ 1480{
1483 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1481 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1484 struct nft_af_info *afi; 1482 struct nft_af_info *afi;
1485 struct nft_table *table; 1483 struct nft_table *table;
1486 struct nft_chain *chain; 1484 struct nft_chain *chain;
1487 struct net *net = sock_net(skb->sk);
1488 int family = nfmsg->nfgen_family; 1485 int family = nfmsg->nfgen_family;
1489 struct nft_ctx ctx; 1486 struct nft_ctx ctx;
1490 1487
@@ -1506,7 +1503,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1506 if (chain->use > 0) 1503 if (chain->use > 0)
1507 return -EBUSY; 1504 return -EBUSY;
1508 1505
1509 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1506 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
1510 1507
1511 return nft_delchain(&ctx); 1508 return nft_delchain(&ctx);
1512} 1509}
@@ -2010,13 +2007,12 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2010 2007
2011static struct nft_expr_info *info; 2008static struct nft_expr_info *info;
2012 2009
2013static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, 2010static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2014 const struct nlmsghdr *nlh, 2011 struct sk_buff *skb, const struct nlmsghdr *nlh,
2015 const struct nlattr * const nla[]) 2012 const struct nlattr * const nla[])
2016{ 2013{
2017 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2014 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2018 struct nft_af_info *afi; 2015 struct nft_af_info *afi;
2019 struct net *net = sock_net(skb->sk);
2020 struct nft_table *table; 2016 struct nft_table *table;
2021 struct nft_chain *chain; 2017 struct nft_chain *chain;
2022 struct nft_rule *rule, *old_rule = NULL; 2018 struct nft_rule *rule, *old_rule = NULL;
@@ -2075,7 +2071,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
2075 return PTR_ERR(old_rule); 2071 return PTR_ERR(old_rule);
2076 } 2072 }
2077 2073
2078 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 2074 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
2079 2075
2080 n = 0; 2076 n = 0;
2081 size = 0; 2077 size = 0;
@@ -2176,13 +2172,12 @@ err1:
2176 return err; 2172 return err;
2177} 2173}
2178 2174
2179static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, 2175static int nf_tables_delrule(struct net *net, struct sock *nlsk,
2180 const struct nlmsghdr *nlh, 2176 struct sk_buff *skb, const struct nlmsghdr *nlh,
2181 const struct nlattr * const nla[]) 2177 const struct nlattr * const nla[])
2182{ 2178{
2183 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2179 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2184 struct nft_af_info *afi; 2180 struct nft_af_info *afi;
2185 struct net *net = sock_net(skb->sk);
2186 struct nft_table *table; 2181 struct nft_table *table;
2187 struct nft_chain *chain = NULL; 2182 struct nft_chain *chain = NULL;
2188 struct nft_rule *rule; 2183 struct nft_rule *rule;
@@ -2205,7 +2200,7 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
2205 return PTR_ERR(chain); 2200 return PTR_ERR(chain);
2206 } 2201 }
2207 2202
2208 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 2203 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
2209 2204
2210 if (chain) { 2205 if (chain) {
2211 if (nla[NFTA_RULE_HANDLE]) { 2206 if (nla[NFTA_RULE_HANDLE]) {
@@ -2344,12 +2339,11 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
2344 [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, 2339 [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
2345}; 2340};
2346 2341
2347static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, 2342static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2348 const struct sk_buff *skb, 2343 const struct sk_buff *skb,
2349 const struct nlmsghdr *nlh, 2344 const struct nlmsghdr *nlh,
2350 const struct nlattr * const nla[]) 2345 const struct nlattr * const nla[])
2351{ 2346{
2352 struct net *net = sock_net(skb->sk);
2353 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2347 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2354 struct nft_af_info *afi = NULL; 2348 struct nft_af_info *afi = NULL;
2355 struct nft_table *table = NULL; 2349 struct nft_table *table = NULL;
@@ -2371,7 +2365,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
2371 return -ENOENT; 2365 return -ENOENT;
2372 } 2366 }
2373 2367
2374 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 2368 nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
2375 return 0; 2369 return 0;
2376} 2370}
2377 2371
@@ -2623,6 +2617,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2623 const struct nlmsghdr *nlh, 2617 const struct nlmsghdr *nlh,
2624 const struct nlattr * const nla[]) 2618 const struct nlattr * const nla[])
2625{ 2619{
2620 struct net *net = sock_net(skb->sk);
2626 const struct nft_set *set; 2621 const struct nft_set *set;
2627 struct nft_ctx ctx; 2622 struct nft_ctx ctx;
2628 struct sk_buff *skb2; 2623 struct sk_buff *skb2;
@@ -2630,7 +2625,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2630 int err; 2625 int err;
2631 2626
2632 /* Verify existence before starting dump */ 2627 /* Verify existence before starting dump */
2633 err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); 2628 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla);
2634 if (err < 0) 2629 if (err < 0)
2635 return err; 2630 return err;
2636 2631
@@ -2693,14 +2688,13 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
2693 return 0; 2688 return 0;
2694} 2689}
2695 2690
2696static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, 2691static int nf_tables_newset(struct net *net, struct sock *nlsk,
2697 const struct nlmsghdr *nlh, 2692 struct sk_buff *skb, const struct nlmsghdr *nlh,
2698 const struct nlattr * const nla[]) 2693 const struct nlattr * const nla[])
2699{ 2694{
2700 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2695 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2701 const struct nft_set_ops *ops; 2696 const struct nft_set_ops *ops;
2702 struct nft_af_info *afi; 2697 struct nft_af_info *afi;
2703 struct net *net = sock_net(skb->sk);
2704 struct nft_table *table; 2698 struct nft_table *table;
2705 struct nft_set *set; 2699 struct nft_set *set;
2706 struct nft_ctx ctx; 2700 struct nft_ctx ctx;
@@ -2798,7 +2792,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2798 if (IS_ERR(table)) 2792 if (IS_ERR(table))
2799 return PTR_ERR(table); 2793 return PTR_ERR(table);
2800 2794
2801 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 2795 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
2802 2796
2803 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); 2797 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]);
2804 if (IS_ERR(set)) { 2798 if (IS_ERR(set)) {
@@ -2882,8 +2876,8 @@ static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set
2882 nft_set_destroy(set); 2876 nft_set_destroy(set);
2883} 2877}
2884 2878
2885static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, 2879static int nf_tables_delset(struct net *net, struct sock *nlsk,
2886 const struct nlmsghdr *nlh, 2880 struct sk_buff *skb, const struct nlmsghdr *nlh,
2887 const struct nlattr * const nla[]) 2881 const struct nlattr * const nla[])
2888{ 2882{
2889 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2883 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
@@ -2896,7 +2890,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2896 if (nla[NFTA_SET_TABLE] == NULL) 2890 if (nla[NFTA_SET_TABLE] == NULL)
2897 return -EINVAL; 2891 return -EINVAL;
2898 2892
2899 err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); 2893 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla);
2900 if (err < 0) 2894 if (err < 0)
2901 return err; 2895 return err;
2902 2896
@@ -3024,7 +3018,7 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
3024 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, 3018 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
3025}; 3019};
3026 3020
3027static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, 3021static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
3028 const struct sk_buff *skb, 3022 const struct sk_buff *skb,
3029 const struct nlmsghdr *nlh, 3023 const struct nlmsghdr *nlh,
3030 const struct nlattr * const nla[], 3024 const struct nlattr * const nla[],
@@ -3033,7 +3027,6 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
3033 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3027 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3034 struct nft_af_info *afi; 3028 struct nft_af_info *afi;
3035 struct nft_table *table; 3029 struct nft_table *table;
3036 struct net *net = sock_net(skb->sk);
3037 3030
3038 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); 3031 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
3039 if (IS_ERR(afi)) 3032 if (IS_ERR(afi))
@@ -3045,7 +3038,7 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
3045 if (!trans && (table->flags & NFT_TABLE_INACTIVE)) 3038 if (!trans && (table->flags & NFT_TABLE_INACTIVE))
3046 return -ENOENT; 3039 return -ENOENT;
3047 3040
3048 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 3041 nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
3049 return 0; 3042 return 0;
3050} 3043}
3051 3044
@@ -3135,6 +3128,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3135 3128
3136static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) 3129static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3137{ 3130{
3131 struct net *net = sock_net(skb->sk);
3138 const struct nft_set *set; 3132 const struct nft_set *set;
3139 struct nft_set_dump_args args; 3133 struct nft_set_dump_args args;
3140 struct nft_ctx ctx; 3134 struct nft_ctx ctx;
@@ -3150,8 +3144,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3150 if (err < 0) 3144 if (err < 0)
3151 return err; 3145 return err;
3152 3146
3153 err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, 3147 err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh,
3154 false); 3148 (void *)nla, false);
3155 if (err < 0) 3149 if (err < 0)
3156 return err; 3150 return err;
3157 3151
@@ -3212,11 +3206,12 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
3212 const struct nlmsghdr *nlh, 3206 const struct nlmsghdr *nlh,
3213 const struct nlattr * const nla[]) 3207 const struct nlattr * const nla[])
3214{ 3208{
3209 struct net *net = sock_net(skb->sk);
3215 const struct nft_set *set; 3210 const struct nft_set *set;
3216 struct nft_ctx ctx; 3211 struct nft_ctx ctx;
3217 int err; 3212 int err;
3218 3213
3219 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); 3214 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false);
3220 if (err < 0) 3215 if (err < 0)
3221 return err; 3216 return err;
3222 3217
@@ -3528,11 +3523,10 @@ err1:
3528 return err; 3523 return err;
3529} 3524}
3530 3525
3531static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, 3526static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3532 const struct nlmsghdr *nlh, 3527 struct sk_buff *skb, const struct nlmsghdr *nlh,
3533 const struct nlattr * const nla[]) 3528 const struct nlattr * const nla[])
3534{ 3529{
3535 struct net *net = sock_net(skb->sk);
3536 const struct nlattr *attr; 3530 const struct nlattr *attr;
3537 struct nft_set *set; 3531 struct nft_set *set;
3538 struct nft_ctx ctx; 3532 struct nft_ctx ctx;
@@ -3541,7 +3535,7 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
3541 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3535 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
3542 return -EINVAL; 3536 return -EINVAL;
3543 3537
3544 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); 3538 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, true);
3545 if (err < 0) 3539 if (err < 0)
3546 return err; 3540 return err;
3547 3541
@@ -3623,8 +3617,8 @@ err1:
3623 return err; 3617 return err;
3624} 3618}
3625 3619
3626static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, 3620static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
3627 const struct nlmsghdr *nlh, 3621 struct sk_buff *skb, const struct nlmsghdr *nlh,
3628 const struct nlattr * const nla[]) 3622 const struct nlattr * const nla[])
3629{ 3623{
3630 const struct nlattr *attr; 3624 const struct nlattr *attr;
@@ -3635,7 +3629,7 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
3635 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3629 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
3636 return -EINVAL; 3630 return -EINVAL;
3637 3631
3638 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); 3632 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false);
3639 if (err < 0) 3633 if (err < 0)
3640 return err; 3634 return err;
3641 3635
@@ -4030,7 +4024,8 @@ static int nf_tables_abort(struct sk_buff *skb)
4030 struct nft_trans *trans, *next; 4024 struct nft_trans *trans, *next;
4031 struct nft_trans_elem *te; 4025 struct nft_trans_elem *te;
4032 4026
4033 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { 4027 list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list,
4028 list) {
4034 switch (trans->msg_type) { 4029 switch (trans->msg_type) {
4035 case NFT_MSG_NEWTABLE: 4030 case NFT_MSG_NEWTABLE:
4036 if (nft_trans_table_update(trans)) { 4031 if (nft_trans_table_update(trans)) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 46453ab318db..77afe913d03d 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -295,8 +295,6 @@ replay:
295 if (!skb) 295 if (!skb)
296 return netlink_ack(oskb, nlh, -ENOMEM); 296 return netlink_ack(oskb, nlh, -ENOMEM);
297 297
298 skb->sk = oskb->sk;
299
300 nfnl_lock(subsys_id); 298 nfnl_lock(subsys_id);
301 ss = rcu_dereference_protected(table[subsys_id].subsys, 299 ss = rcu_dereference_protected(table[subsys_id].subsys,
302 lockdep_is_held(&table[subsys_id].mutex)); 300 lockdep_is_held(&table[subsys_id].mutex));
@@ -381,7 +379,7 @@ replay:
381 goto ack; 379 goto ack;
382 380
383 if (nc->call_batch) { 381 if (nc->call_batch) {
384 err = nc->call_batch(net->nfnl, skb, nlh, 382 err = nc->call_batch(net, net->nfnl, skb, nlh,
385 (const struct nlattr **)cda); 383 (const struct nlattr **)cda);
386 } 384 }
387 385
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7d81d280cb4f..861c6615253b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -365,8 +365,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
365 break; 365 break;
366 } 366 }
367 367
368 nfnl_ct = rcu_dereference(nfnl_ct_hook);
369
368 if (queue->flags & NFQA_CFG_F_CONNTRACK) { 370 if (queue->flags & NFQA_CFG_F_CONNTRACK) {
369 nfnl_ct = rcu_dereference(nfnl_ct_hook);
370 if (nfnl_ct != NULL) { 371 if (nfnl_ct != NULL) {
371 ct = nfnl_ct->get_ct(entskb, &ctinfo); 372 ct = nfnl_ct->get_ct(entskb, &ctinfo);
372 if (ct != NULL) 373 if (ct != NULL)
@@ -1064,9 +1065,10 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
1064 if (entry == NULL) 1065 if (entry == NULL)
1065 return -ENOENT; 1066 return -ENOENT;
1066 1067
1068 /* rcu lock already held from nfnl->call_rcu. */
1069 nfnl_ct = rcu_dereference(nfnl_ct_hook);
1070
1067 if (nfqa[NFQA_CT]) { 1071 if (nfqa[NFQA_CT]) {
1068 /* rcu lock already held from nfnl->call_rcu. */
1069 nfnl_ct = rcu_dereference(nfnl_ct_hook);
1070 if (nfnl_ct != NULL) 1072 if (nfnl_ct != NULL)
1071 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); 1073 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
1072 } 1074 }
@@ -1417,6 +1419,7 @@ static int __init nfnetlink_queue_init(void)
1417 1419
1418cleanup_netlink_notifier: 1420cleanup_netlink_notifier:
1419 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1421 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1422 unregister_pernet_subsys(&nfnl_queue_net_ops);
1420out: 1423out:
1421 return status; 1424 return status;
1422} 1425}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c2cc11168fd5..3e8892216f94 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -53,6 +53,8 @@ struct ovs_conntrack_info {
53 struct md_labels labels; 53 struct md_labels labels;
54}; 54};
55 55
56static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
57
56static u16 key_to_nfproto(const struct sw_flow_key *key) 58static u16 key_to_nfproto(const struct sw_flow_key *key)
57{ 59{
58 switch (ntohs(key->eth.type)) { 60 switch (ntohs(key->eth.type)) {
@@ -141,6 +143,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
141 * previously sent the packet to conntrack via the ct action. 143 * previously sent the packet to conntrack via the ct action.
142 */ 144 */
143static void ovs_ct_update_key(const struct sk_buff *skb, 145static void ovs_ct_update_key(const struct sk_buff *skb,
146 const struct ovs_conntrack_info *info,
144 struct sw_flow_key *key, bool post_ct) 147 struct sw_flow_key *key, bool post_ct)
145{ 148{
146 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; 149 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
@@ -158,13 +161,15 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
158 zone = nf_ct_zone(ct); 161 zone = nf_ct_zone(ct);
159 } else if (post_ct) { 162 } else if (post_ct) {
160 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; 163 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
164 if (info)
165 zone = &info->zone;
161 } 166 }
162 __ovs_ct_update_key(key, state, zone, ct); 167 __ovs_ct_update_key(key, state, zone, ct);
163} 168}
164 169
165void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) 170void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
166{ 171{
167 ovs_ct_update_key(skb, key, false); 172 ovs_ct_update_key(skb, NULL, key, false);
168} 173}
169 174
170int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) 175int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
@@ -418,7 +423,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
418 } 423 }
419 } 424 }
420 425
421 ovs_ct_update_key(skb, key, true); 426 ovs_ct_update_key(skb, info, key, true);
422 427
423 return 0; 428 return 0;
424} 429}
@@ -708,7 +713,7 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
708 nf_conntrack_get(&ct_info.ct->ct_general); 713 nf_conntrack_get(&ct_info.ct->ct_general);
709 return 0; 714 return 0;
710err_free_ct: 715err_free_ct:
711 nf_conntrack_free(ct_info.ct); 716 __ovs_ct_free_action(&ct_info);
712 return err; 717 return err;
713} 718}
714 719
@@ -750,6 +755,11 @@ void ovs_ct_free_action(const struct nlattr *a)
750{ 755{
751 struct ovs_conntrack_info *ct_info = nla_data(a); 756 struct ovs_conntrack_info *ct_info = nla_data(a);
752 757
758 __ovs_ct_free_action(ct_info);
759}
760
761static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
762{
753 if (ct_info->helper) 763 if (ct_info->helper)
754 module_put(ct_info->helper->me); 764 module_put(ct_info->helper->me);
755 if (ct_info->ct) 765 if (ct_info->ct)
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index b41e9ea2ffff..f53bf3b6558b 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -49,7 +49,6 @@
49struct rfkill { 49struct rfkill {
50 spinlock_t lock; 50 spinlock_t lock;
51 51
52 const char *name;
53 enum rfkill_type type; 52 enum rfkill_type type;
54 53
55 unsigned long state; 54 unsigned long state;
@@ -73,6 +72,7 @@ struct rfkill {
73 struct delayed_work poll_work; 72 struct delayed_work poll_work;
74 struct work_struct uevent_work; 73 struct work_struct uevent_work;
75 struct work_struct sync_work; 74 struct work_struct sync_work;
75 char name[];
76}; 76};
77#define to_rfkill(d) container_of(d, struct rfkill, dev) 77#define to_rfkill(d) container_of(d, struct rfkill, dev)
78 78
@@ -876,14 +876,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
876 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) 876 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
877 return NULL; 877 return NULL;
878 878
879 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); 879 rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
880 if (!rfkill) 880 if (!rfkill)
881 return NULL; 881 return NULL;
882 882
883 spin_lock_init(&rfkill->lock); 883 spin_lock_init(&rfkill->lock);
884 INIT_LIST_HEAD(&rfkill->node); 884 INIT_LIST_HEAD(&rfkill->node);
885 rfkill->type = type; 885 rfkill->type = type;
886 rfkill->name = name; 886 strcpy(rfkill->name, name);
887 rfkill->ops = ops; 887 rfkill->ops = ops;
888 rfkill->data = ops_data; 888 rfkill->data = ops_data;
889 889
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7ec667dd4ce1..b5c2cf2aa6d4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -950,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
950 } 950 }
951 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); 951 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
952 if (!netif_is_multiqueue(dev)) 952 if (!netif_is_multiqueue(dev))
953 sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 953 sch->flags |= TCQ_F_ONETXQUEUE;
954 } 954 }
955 955
956 sch->handle = handle; 956 sch->handle = handle;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index acb45b8c2a9d..ec529121f38a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -323,14 +323,13 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
323 } 323 }
324 } 324 }
325 } 325 }
326 rcu_read_unlock();
327
328 if (baddr) { 326 if (baddr) {
329 fl6->saddr = baddr->v6.sin6_addr; 327 fl6->saddr = baddr->v6.sin6_addr;
330 fl6->fl6_sport = baddr->v6.sin6_port; 328 fl6->fl6_sport = baddr->v6.sin6_port;
331 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 329 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
332 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 330 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
333 } 331 }
332 rcu_read_unlock();
334 333
335out: 334out:
336 if (!IS_ERR_OR_NULL(dst)) { 335 if (!IS_ERR_OR_NULL(dst)) {
@@ -642,6 +641,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
642 struct sock *newsk; 641 struct sock *newsk;
643 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 642 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
644 struct sctp6_sock *newsctp6sk; 643 struct sctp6_sock *newsctp6sk;
644 struct ipv6_txoptions *opt;
645 645
646 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); 646 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
647 if (!newsk) 647 if (!newsk)
@@ -661,6 +661,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
661 661
662 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 662 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
663 663
664 rcu_read_lock();
665 opt = rcu_dereference(np->opt);
666 if (opt)
667 opt = ipv6_dup_options(newsk, opt);
668 RCU_INIT_POINTER(newnp->opt, opt);
669 rcu_read_unlock();
670
664 /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() 671 /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
665 * and getpeername(). 672 * and getpeername().
666 */ 673 */
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7e8f0a117106..c0380cfb16ae 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -324,6 +324,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
324 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 324 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
325 "illegal chunk"); 325 "illegal chunk");
326 326
327 sctp_chunk_hold(chunk);
327 sctp_outq_tail_data(q, chunk); 328 sctp_outq_tail_data(q, chunk);
328 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 329 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
329 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); 330 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
@@ -1251,6 +1252,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1251 */ 1252 */
1252 1253
1253 sack_a_rwnd = ntohl(sack->a_rwnd); 1254 sack_a_rwnd = ntohl(sack->a_rwnd);
1255 asoc->peer.zero_window_announced = !sack_a_rwnd;
1254 outstanding = q->outstanding_bytes; 1256 outstanding = q->outstanding_bytes;
1255 1257
1256 if (outstanding < sack_a_rwnd) 1258 if (outstanding < sack_a_rwnd)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 763e06a55155..5d6a03fad378 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1652,7 +1652,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1652 1652
1653 /* Set an expiration time for the cookie. */ 1653 /* Set an expiration time for the cookie. */
1654 cookie->c.expiration = ktime_add(asoc->cookie_life, 1654 cookie->c.expiration = ktime_add(asoc->cookie_life,
1655 ktime_get()); 1655 ktime_get_real());
1656 1656
1657 /* Copy the peer's init packet. */ 1657 /* Copy the peer's init packet. */
1658 memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, 1658 memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
@@ -1780,7 +1780,7 @@ no_hmac:
1780 if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) 1780 if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
1781 kt = skb_get_ktime(skb); 1781 kt = skb_get_ktime(skb);
1782 else 1782 else
1783 kt = ktime_get(); 1783 kt = ktime_get_real();
1784 1784
1785 if (!asoc && ktime_before(bear_cookie->expiration, kt)) { 1785 if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
1786 /* 1786 /*
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 6f46aa16cb76..cd34a4a34065 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5412,7 +5412,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
5412 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); 5412 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
5413 5413
5414 if (asoc->overall_error_count >= asoc->max_retrans) { 5414 if (asoc->overall_error_count >= asoc->max_retrans) {
5415 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { 5415 if (asoc->peer.zero_window_announced &&
5416 asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
5416 /* 5417 /*
5417 * We are here likely because the receiver had its rwnd 5418 * We are here likely because the receiver had its rwnd
5418 * closed for a while and we have not been able to 5419 * closed for a while and we have not been able to
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 03c8256063ec..9b6cc6de80d8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1952,8 +1952,6 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1952 1952
1953 /* Now send the (possibly) fragmented message. */ 1953 /* Now send the (possibly) fragmented message. */
1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1955 sctp_chunk_hold(chunk);
1956
1957 /* Do accounting for the write space. */ 1955 /* Do accounting for the write space. */
1958 sctp_set_owner_w(chunk); 1956 sctp_set_owner_w(chunk);
1959 1957
@@ -1966,15 +1964,13 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1966 * breaks. 1964 * breaks.
1967 */ 1965 */
1968 err = sctp_primitive_SEND(net, asoc, datamsg); 1966 err = sctp_primitive_SEND(net, asoc, datamsg);
1967 sctp_datamsg_put(datamsg);
1969 /* Did the lower layer accept the chunk? */ 1968 /* Did the lower layer accept the chunk? */
1970 if (err) { 1969 if (err)
1971 sctp_datamsg_free(datamsg);
1972 goto out_free; 1970 goto out_free;
1973 }
1974 1971
1975 pr_debug("%s: we sent primitively\n", __func__); 1972 pr_debug("%s: we sent primitively\n", __func__);
1976 1973
1977 sctp_datamsg_put(datamsg);
1978 err = msg_len; 1974 err = msg_len;
1979 1975
1980 if (unlikely(wait_connect)) { 1976 if (unlikely(wait_connect)) {
@@ -7167,6 +7163,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
7167 newsk->sk_type = sk->sk_type; 7163 newsk->sk_type = sk->sk_type;
7168 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7164 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
7169 newsk->sk_flags = sk->sk_flags; 7165 newsk->sk_flags = sk->sk_flags;
7166 newsk->sk_tsflags = sk->sk_tsflags;
7170 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7167 newsk->sk_no_check_tx = sk->sk_no_check_tx;
7171 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7168 newsk->sk_no_check_rx = sk->sk_no_check_rx;
7172 newsk->sk_reuse = sk->sk_reuse; 7169 newsk->sk_reuse = sk->sk_reuse;
@@ -7199,6 +7196,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
7199 newinet->mc_ttl = 1; 7196 newinet->mc_ttl = 1;
7200 newinet->mc_index = 0; 7197 newinet->mc_index = 0;
7201 newinet->mc_list = NULL; 7198 newinet->mc_list = NULL;
7199
7200 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
7201 net_enable_timestamp();
7202} 7202}
7203 7203
7204static inline void sctp_copy_descendant(struct sock *sk_to, 7204static inline void sctp_copy_descendant(struct sock *sk_to,
diff --git a/net/socket.c b/net/socket.c
index 456fadb3d819..29822d6dd91e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1695,6 +1695,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1695 msg.msg_name = addr ? (struct sockaddr *)&address : NULL; 1695 msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
1696 /* We assume all kernel code knows the size of sockaddr_storage */ 1696 /* We assume all kernel code knows the size of sockaddr_storage */
1697 msg.msg_namelen = 0; 1697 msg.msg_namelen = 0;
1698 msg.msg_iocb = NULL;
1698 if (sock->file->f_flags & O_NONBLOCK) 1699 if (sock->file->f_flags & O_NONBLOCK)
1699 flags |= MSG_DONTWAIT; 1700 flags |= MSG_DONTWAIT;
1700 err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); 1701 err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 95f82d8d4888..229956bf8457 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -353,20 +353,12 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
353{ 353{
354 struct rpc_xprt *xprt = req->rq_xprt; 354 struct rpc_xprt *xprt = req->rq_xprt;
355 struct svc_serv *bc_serv = xprt->bc_serv; 355 struct svc_serv *bc_serv = xprt->bc_serv;
356 struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
357 356
358 spin_lock(&xprt->bc_pa_lock); 357 spin_lock(&xprt->bc_pa_lock);
359 list_del(&req->rq_bc_pa_list); 358 list_del(&req->rq_bc_pa_list);
360 xprt_dec_alloc_count(xprt, 1); 359 xprt_dec_alloc_count(xprt, 1);
361 spin_unlock(&xprt->bc_pa_lock); 360 spin_unlock(&xprt->bc_pa_lock);
362 361
363 if (copied <= rq_rcv_buf->head[0].iov_len) {
364 rq_rcv_buf->head[0].iov_len = copied;
365 rq_rcv_buf->page_len = 0;
366 } else {
367 rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
368 }
369
370 req->rq_private_buf.len = copied; 362 req->rq_private_buf.len = copied;
371 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 363 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
372 364
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f14f24ee9983..73ad57a59989 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -250,11 +250,11 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
250} 250}
251EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 251EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
252 252
253static int rpc_wait_bit_killable(struct wait_bit_key *key) 253static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
254{ 254{
255 if (fatal_signal_pending(current))
256 return -ERESTARTSYS;
257 freezable_schedule_unsafe(); 255 freezable_schedule_unsafe();
256 if (signal_pending_state(mode, current))
257 return -ERESTARTSYS;
258 return 0; 258 return 0;
259} 259}
260 260
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 7fccf9675df8..cc9852897395 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1363,7 +1363,19 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1363 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1363 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1366
1367 /* Adjust the argument buffer length */
1366 rqstp->rq_arg.len = req->rq_private_buf.len; 1368 rqstp->rq_arg.len = req->rq_private_buf.len;
1369 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1370 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1371 rqstp->rq_arg.page_len = 0;
1372 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1373 rqstp->rq_arg.page_len)
1374 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1375 rqstp->rq_arg.head[0].iov_len;
1376 else
1377 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1378 rqstp->rq_arg.page_len;
1367 1379
1368 /* reset result send buffer "put" position */ 1380 /* reset result send buffer "put" position */
1369 resv->iov_len = 0; 1381 resv->iov_len = 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 45aebd966978..a4631477cedf 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2256,14 +2256,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2256 /* Lock the socket to prevent queue disordering 2256 /* Lock the socket to prevent queue disordering
2257 * while sleeps in memcpy_tomsg 2257 * while sleeps in memcpy_tomsg
2258 */ 2258 */
2259 err = mutex_lock_interruptible(&u->readlock); 2259 mutex_lock(&u->readlock);
2260 if (unlikely(err)) {
2261 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2262 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2263 */
2264 err = noblock ? -EAGAIN : -ERESTARTSYS;
2265 goto out;
2266 }
2267 2260
2268 if (flags & MSG_PEEK) 2261 if (flags & MSG_PEEK)
2269 skip = sk_peek_offset(sk, flags); 2262 skip = sk_peek_offset(sk, flags);
@@ -2307,12 +2300,12 @@ again:
2307 timeo = unix_stream_data_wait(sk, timeo, last, 2300 timeo = unix_stream_data_wait(sk, timeo, last,
2308 last_len); 2301 last_len);
2309 2302
2310 if (signal_pending(current) || 2303 if (signal_pending(current)) {
2311 mutex_lock_interruptible(&u->readlock)) {
2312 err = sock_intr_errno(timeo); 2304 err = sock_intr_errno(timeo);
2313 goto out; 2305 goto out;
2314 } 2306 }
2315 2307
2308 mutex_lock(&u->readlock);
2316 continue; 2309 continue;
2317unlock: 2310unlock:
2318 unix_state_unlock(sk); 2311 unix_state_unlock(sk);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c71e274c810a..75b0d23ee882 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7941,8 +7941,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
7941 if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { 7941 if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
7942 if (!(rdev->wiphy.features & 7942 if (!(rdev->wiphy.features &
7943 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || 7943 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
7944 !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) 7944 !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) {
7945 kzfree(connkeys);
7945 return -EINVAL; 7946 return -EINVAL;
7947 }
7946 connect.flags |= ASSOC_REQ_USE_RRM; 7948 connect.flags |= ASSOC_REQ_USE_RRM;
7947 } 7949 }
7948 7950
@@ -9503,6 +9505,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
9503 if (new_triggers.tcp && new_triggers.tcp->sock) 9505 if (new_triggers.tcp && new_triggers.tcp->sock)
9504 sock_release(new_triggers.tcp->sock); 9506 sock_release(new_triggers.tcp->sock);
9505 kfree(new_triggers.tcp); 9507 kfree(new_triggers.tcp);
9508 kfree(new_triggers.nd_config);
9506 return err; 9509 return err;
9507} 9510}
9508#endif 9511#endif
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2e8d6f39ed56..06d050da0d94 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3029,6 +3029,7 @@ int set_regdom(const struct ieee80211_regdomain *rd,
3029 break; 3029 break;
3030 default: 3030 default:
3031 WARN(1, "invalid initiator %d\n", lr->initiator); 3031 WARN(1, "invalid initiator %d\n", lr->initiator);
3032 kfree(rd);
3032 return -EINVAL; 3033 return -EINVAL;
3033 } 3034 }
3034 3035
@@ -3221,8 +3222,10 @@ int __init regulatory_init(void)
3221 /* We always try to get an update for the static regdomain */ 3222 /* We always try to get an update for the static regdomain */
3222 err = regulatory_hint_core(cfg80211_world_regdom->alpha2); 3223 err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
3223 if (err) { 3224 if (err) {
3224 if (err == -ENOMEM) 3225 if (err == -ENOMEM) {
3226 platform_device_unregister(reg_pdev);
3225 return err; 3227 return err;
3228 }
3226 /* 3229 /*
3227 * N.B. kobject_uevent_env() can fail mainly for when we're out 3230 * N.B. kobject_uevent_env() can fail mainly for when we're out
3228 * memory which is handled and propagated appropriately above 3231 * memory which is handled and propagated appropriately above
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 09bfcbac63bb..948fa5560de5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -303,6 +303,14 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
303} 303}
304EXPORT_SYMBOL(xfrm_policy_alloc); 304EXPORT_SYMBOL(xfrm_policy_alloc);
305 305
306static void xfrm_policy_destroy_rcu(struct rcu_head *head)
307{
308 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
309
310 security_xfrm_policy_free(policy->security);
311 kfree(policy);
312}
313
306/* Destroy xfrm_policy: descendant resources must be released to this moment. */ 314/* Destroy xfrm_policy: descendant resources must be released to this moment. */
307 315
308void xfrm_policy_destroy(struct xfrm_policy *policy) 316void xfrm_policy_destroy(struct xfrm_policy *policy)
@@ -312,8 +320,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
312 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 320 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
313 BUG(); 321 BUG();
314 322
315 security_xfrm_policy_free(policy->security); 323 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
316 kfree(policy);
317} 324}
318EXPORT_SYMBOL(xfrm_policy_destroy); 325EXPORT_SYMBOL(xfrm_policy_destroy);
319 326
@@ -1214,8 +1221,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1214 struct xfrm_policy *pol; 1221 struct xfrm_policy *pol;
1215 struct net *net = sock_net(sk); 1222 struct net *net = sock_net(sk);
1216 1223
1224 rcu_read_lock();
1217 read_lock_bh(&net->xfrm.xfrm_policy_lock); 1225 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1218 if ((pol = sk->sk_policy[dir]) != NULL) { 1226 pol = rcu_dereference(sk->sk_policy[dir]);
1227 if (pol != NULL) {
1219 bool match = xfrm_selector_match(&pol->selector, fl, 1228 bool match = xfrm_selector_match(&pol->selector, fl,
1220 sk->sk_family); 1229 sk->sk_family);
1221 int err = 0; 1230 int err = 0;
@@ -1239,6 +1248,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1239 } 1248 }
1240out: 1249out:
1241 read_unlock_bh(&net->xfrm.xfrm_policy_lock); 1250 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1251 rcu_read_unlock();
1242 return pol; 1252 return pol;
1243} 1253}
1244 1254
@@ -1307,13 +1317,14 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1307#endif 1317#endif
1308 1318
1309 write_lock_bh(&net->xfrm.xfrm_policy_lock); 1319 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1310 old_pol = sk->sk_policy[dir]; 1320 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1311 sk->sk_policy[dir] = pol; 1321 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1312 if (pol) { 1322 if (pol) {
1313 pol->curlft.add_time = get_seconds(); 1323 pol->curlft.add_time = get_seconds();
1314 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 1324 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1315 xfrm_sk_policy_link(pol, dir); 1325 xfrm_sk_policy_link(pol, dir);
1316 } 1326 }
1327 rcu_assign_pointer(sk->sk_policy[dir], pol);
1317 if (old_pol) { 1328 if (old_pol) {
1318 if (pol) 1329 if (pol)
1319 xfrm_policy_requeue(old_pol, pol); 1330 xfrm_policy_requeue(old_pol, pol);
@@ -1361,17 +1372,26 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1361 return newp; 1372 return newp;
1362} 1373}
1363 1374
1364int __xfrm_sk_clone_policy(struct sock *sk) 1375int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1365{ 1376{
1366 struct xfrm_policy *p0 = sk->sk_policy[0], 1377 const struct xfrm_policy *p;
1367 *p1 = sk->sk_policy[1]; 1378 struct xfrm_policy *np;
1379 int i, ret = 0;
1368 1380
1369 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1381 rcu_read_lock();
1370 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1382 for (i = 0; i < 2; i++) {
1371 return -ENOMEM; 1383 p = rcu_dereference(osk->sk_policy[i]);
1372 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1384 if (p) {
1373 return -ENOMEM; 1385 np = clone_policy(p, i);
1374 return 0; 1386 if (unlikely(!np)) {
1387 ret = -ENOMEM;
1388 break;
1389 }
1390 rcu_assign_pointer(sk->sk_policy[i], np);
1391 }
1392 }
1393 rcu_read_unlock();
1394 return ret;
1375} 1395}
1376 1396
1377static int 1397static int
@@ -2198,6 +2218,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2198 xdst = NULL; 2218 xdst = NULL;
2199 route = NULL; 2219 route = NULL;
2200 2220
2221 sk = sk_const_to_full_sk(sk);
2201 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2222 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2202 num_pols = 1; 2223 num_pols = 1;
2203 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 2224 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
@@ -2477,6 +2498,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2477 } 2498 }
2478 2499
2479 pol = NULL; 2500 pol = NULL;
2501 sk = sk_to_full_sk(sk);
2480 if (sk && sk->sk_policy[dir]) { 2502 if (sk && sk->sk_policy[dir]) {
2481 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2503 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2482 if (IS_ERR(pol)) { 2504 if (IS_ERR(pol)) {
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 1a10d8ac8162..dacf71a43ad4 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -62,7 +62,7 @@ vmlinux_link()
62 -Wl,--start-group \ 62 -Wl,--start-group \
63 ${KBUILD_VMLINUX_MAIN} \ 63 ${KBUILD_VMLINUX_MAIN} \
64 -Wl,--end-group \ 64 -Wl,--end-group \
65 -lutil ${1} 65 -lutil -lrt ${1}
66 rm -f linux 66 rm -f linux
67 fi 67 fi
68} 68}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 963f82430938..bff5c8b329d1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -355,6 +355,8 @@ enum {
355 ((pci)->device == 0x0d0c) || \ 355 ((pci)->device == 0x0d0c) || \
356 ((pci)->device == 0x160c)) 356 ((pci)->device == 0x160c))
357 357
358#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
359
358static char *driver_short_names[] = { 360static char *driver_short_names[] = {
359 [AZX_DRIVER_ICH] = "HDA Intel", 361 [AZX_DRIVER_ICH] = "HDA Intel",
360 [AZX_DRIVER_PCH] = "HDA Intel PCH", 362 [AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -506,15 +508,36 @@ static void azx_init_pci(struct azx *chip)
506 } 508 }
507} 509}
508 510
511/*
512 * In BXT-P A0, HD-Audio DMA requests is later than expected,
513 * and makes an audio stream sensitive to system latencies when
514 * 24/32 bits are playing.
515 * Adjusting threshold of DMA fifo to force the DMA request
516 * sooner to improve latency tolerance at the expense of power.
517 */
518static void bxt_reduce_dma_latency(struct azx *chip)
519{
520 u32 val;
521
522 val = azx_readl(chip, SKL_EM4L);
523 val &= (0x3 << 20);
524 azx_writel(chip, SKL_EM4L, val);
525}
526
509static void hda_intel_init_chip(struct azx *chip, bool full_reset) 527static void hda_intel_init_chip(struct azx *chip, bool full_reset)
510{ 528{
511 struct hdac_bus *bus = azx_bus(chip); 529 struct hdac_bus *bus = azx_bus(chip);
530 struct pci_dev *pci = chip->pci;
512 531
513 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 532 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
514 snd_hdac_set_codec_wakeup(bus, true); 533 snd_hdac_set_codec_wakeup(bus, true);
515 azx_init_chip(chip, full_reset); 534 azx_init_chip(chip, full_reset);
516 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 535 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
517 snd_hdac_set_codec_wakeup(bus, false); 536 snd_hdac_set_codec_wakeup(bus, false);
537
538 /* reduce dma latency to avoid noise */
539 if (IS_BROXTON(pci))
540 bxt_reduce_dma_latency(chip);
518} 541}
519 542
520/* calculate runtime delay from LPIB */ 543/* calculate runtime delay from LPIB */
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index f8a12ca477f1..4ef2259f88ca 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -778,7 +778,8 @@ static const struct hda_pintbl alienware_pincfgs[] = {
778}; 778};
779 779
780static const struct snd_pci_quirk ca0132_quirks[] = { 780static const struct snd_pci_quirk ca0132_quirks[] = {
781 SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15", QUIRK_ALIENWARE), 781 SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
782 SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
782 {} 783 {}
783}; 784};
784 785
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9bedf7c85e29..6c268dad143f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -111,6 +111,7 @@ struct alc_spec {
111 void (*power_hook)(struct hda_codec *codec); 111 void (*power_hook)(struct hda_codec *codec);
112#endif 112#endif
113 void (*shutup)(struct hda_codec *codec); 113 void (*shutup)(struct hda_codec *codec);
114 void (*reboot_notify)(struct hda_codec *codec);
114 115
115 int init_amp; 116 int init_amp;
116 int codec_variant; /* flag for other variants */ 117 int codec_variant; /* flag for other variants */
@@ -773,6 +774,25 @@ static inline void alc_shutup(struct hda_codec *codec)
773 snd_hda_shutup_pins(codec); 774 snd_hda_shutup_pins(codec);
774} 775}
775 776
777static void alc_reboot_notify(struct hda_codec *codec)
778{
779 struct alc_spec *spec = codec->spec;
780
781 if (spec && spec->reboot_notify)
782 spec->reboot_notify(codec);
783 else
784 alc_shutup(codec);
785}
786
787/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
788static void alc_d3_at_reboot(struct hda_codec *codec)
789{
790 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
791 snd_hda_codec_write(codec, codec->core.afg, 0,
792 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
793 msleep(10);
794}
795
776#define alc_free snd_hda_gen_free 796#define alc_free snd_hda_gen_free
777 797
778#ifdef CONFIG_PM 798#ifdef CONFIG_PM
@@ -818,7 +838,7 @@ static const struct hda_codec_ops alc_patch_ops = {
818 .suspend = alc_suspend, 838 .suspend = alc_suspend,
819 .check_power_status = snd_hda_gen_check_power_status, 839 .check_power_status = snd_hda_gen_check_power_status,
820#endif 840#endif
821 .reboot_notify = alc_shutup, 841 .reboot_notify = alc_reboot_notify,
822}; 842};
823 843
824 844
@@ -4198,6 +4218,8 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
4198 struct alc_spec *spec = codec->spec; 4218 struct alc_spec *spec = codec->spec;
4199 4219
4200 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4220 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4221 spec->shutup = alc_no_shutup; /* reduce click noise */
4222 spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
4201 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 4223 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4202 codec->power_save_node = 0; /* avoid click noises */ 4224 codec->power_save_node = 0; /* avoid click noises */
4203 snd_hda_apply_pincfgs(codec, pincfgs); 4225 snd_hda_apply_pincfgs(codec, pincfgs);
@@ -4578,6 +4600,7 @@ enum {
4578 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, 4600 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
4579 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 4601 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
4580 ALC292_FIXUP_TPT440_DOCK, 4602 ALC292_FIXUP_TPT440_DOCK,
4603 ALC292_FIXUP_TPT440,
4581 ALC283_FIXUP_BXBT2807_MIC, 4604 ALC283_FIXUP_BXBT2807_MIC,
4582 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, 4605 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
4583 ALC282_FIXUP_ASPIRE_V5_PINS, 4606 ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -4593,9 +4616,11 @@ enum {
4593 ALC288_FIXUP_DISABLE_AAMIX, 4616 ALC288_FIXUP_DISABLE_AAMIX,
4594 ALC292_FIXUP_DELL_E7X, 4617 ALC292_FIXUP_DELL_E7X,
4595 ALC292_FIXUP_DISABLE_AAMIX, 4618 ALC292_FIXUP_DISABLE_AAMIX,
4619 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
4596 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4620 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
4597 ALC275_FIXUP_DELL_XPS, 4621 ALC275_FIXUP_DELL_XPS,
4598 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, 4622 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4623 ALC293_FIXUP_LENOVO_SPK_NOISE,
4599}; 4624};
4600 4625
4601static const struct hda_fixup alc269_fixups[] = { 4626static const struct hda_fixup alc269_fixups[] = {
@@ -5050,6 +5075,12 @@ static const struct hda_fixup alc269_fixups[] = {
5050 .chained = true, 5075 .chained = true,
5051 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST 5076 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
5052 }, 5077 },
5078 [ALC292_FIXUP_TPT440] = {
5079 .type = HDA_FIXUP_FUNC,
5080 .v.func = alc_fixup_disable_aamix,
5081 .chained = true,
5082 .chain_id = ALC292_FIXUP_TPT440_DOCK,
5083 },
5053 [ALC283_FIXUP_BXBT2807_MIC] = { 5084 [ALC283_FIXUP_BXBT2807_MIC] = {
5054 .type = HDA_FIXUP_PINS, 5085 .type = HDA_FIXUP_PINS,
5055 .v.pins = (const struct hda_pintbl[]) { 5086 .v.pins = (const struct hda_pintbl[]) {
@@ -5149,6 +5180,12 @@ static const struct hda_fixup alc269_fixups[] = {
5149 .chained = true, 5180 .chained = true,
5150 .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE 5181 .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE
5151 }, 5182 },
5183 [ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK] = {
5184 .type = HDA_FIXUP_FUNC,
5185 .v.func = alc_fixup_disable_aamix,
5186 .chained = true,
5187 .chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE
5188 },
5152 [ALC292_FIXUP_DELL_E7X] = { 5189 [ALC292_FIXUP_DELL_E7X] = {
5153 .type = HDA_FIXUP_FUNC, 5190 .type = HDA_FIXUP_FUNC,
5154 .v.func = alc_fixup_dell_xps13, 5191 .v.func = alc_fixup_dell_xps13,
@@ -5187,6 +5224,12 @@ static const struct hda_fixup alc269_fixups[] = {
5187 .chained = true, 5224 .chained = true,
5188 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE 5225 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
5189 }, 5226 },
5227 [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
5228 .type = HDA_FIXUP_FUNC,
5229 .v.func = alc_fixup_disable_aamix,
5230 .chained = true,
5231 .chain_id = ALC269_FIXUP_THINKPAD_ACPI
5232 },
5190}; 5233};
5191 5234
5192static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5235static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5221,11 +5264,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5221 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5264 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5222 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5265 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5223 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5266 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5224 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5267 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5225 SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5268 SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5226 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5269 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5227 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5270 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5228 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5271 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5229 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5272 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5230 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5273 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5231 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5274 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5325,15 +5368,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5325 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), 5368 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
5326 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), 5369 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
5327 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), 5370 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
5328 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), 5371 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440),
5329 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), 5372 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
5330 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), 5373 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
5331 SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK), 5374 SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
5332 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5375 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
5333 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5376 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
5334 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5377 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5378 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
5335 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 5379 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
5336 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5380 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
5381 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5337 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 5382 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
5338 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), 5383 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
5339 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5384 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5343,6 +5388,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5343 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5388 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5344 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5389 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5345 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5390 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5391 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5346 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5392 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5347 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5393 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5348 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5394 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5423,6 +5469,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5423 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, 5469 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
5424 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"}, 5470 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
5425 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, 5471 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
5472 {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
5426 {} 5473 {}
5427}; 5474};
5428 5475
@@ -6409,6 +6456,7 @@ static const struct hda_fixup alc662_fixups[] = {
6409static const struct snd_pci_quirk alc662_fixup_tbl[] = { 6456static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6410 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2), 6457 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
6411 SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC), 6458 SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
6459 SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
6412 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 6460 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
6413 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 6461 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
6414 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 6462 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 714df906249e..41c31db65039 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -741,10 +741,11 @@ snd_rme96_playback_setrate(struct rme96 *rme96,
741 { 741 {
742 /* change to/from double-speed: reset the DAC (if available) */ 742 /* change to/from double-speed: reset the DAC (if available) */
743 snd_rme96_reset_dac(rme96); 743 snd_rme96_reset_dac(rme96);
744 return 1; /* need to restore volume */
744 } else { 745 } else {
745 writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); 746 writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
747 return 0;
746 } 748 }
747 return 0;
748} 749}
749 750
750static int 751static int
@@ -980,6 +981,7 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
980 struct rme96 *rme96 = snd_pcm_substream_chip(substream); 981 struct rme96 *rme96 = snd_pcm_substream_chip(substream);
981 struct snd_pcm_runtime *runtime = substream->runtime; 982 struct snd_pcm_runtime *runtime = substream->runtime;
982 int err, rate, dummy; 983 int err, rate, dummy;
984 bool apply_dac_volume = false;
983 985
984 runtime->dma_area = (void __force *)(rme96->iobase + 986 runtime->dma_area = (void __force *)(rme96->iobase +
985 RME96_IO_PLAY_BUFFER); 987 RME96_IO_PLAY_BUFFER);
@@ -993,24 +995,26 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
993 { 995 {
994 /* slave clock */ 996 /* slave clock */
995 if ((int)params_rate(params) != rate) { 997 if ((int)params_rate(params) != rate) {
996 spin_unlock_irq(&rme96->lock); 998 err = -EIO;
997 return -EIO; 999 goto error;
998 } 1000 }
999 } else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) { 1001 } else {
1000 spin_unlock_irq(&rme96->lock); 1002 err = snd_rme96_playback_setrate(rme96, params_rate(params));
1001 return err; 1003 if (err < 0)
1002 } 1004 goto error;
1003 if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) { 1005 apply_dac_volume = err > 0; /* need to restore volume later? */
1004 spin_unlock_irq(&rme96->lock);
1005 return err;
1006 } 1006 }
1007
1008 err = snd_rme96_playback_setformat(rme96, params_format(params));
1009 if (err < 0)
1010 goto error;
1007 snd_rme96_setframelog(rme96, params_channels(params), 1); 1011 snd_rme96_setframelog(rme96, params_channels(params), 1);
1008 if (rme96->capture_periodsize != 0) { 1012 if (rme96->capture_periodsize != 0) {
1009 if (params_period_size(params) << rme96->playback_frlog != 1013 if (params_period_size(params) << rme96->playback_frlog !=
1010 rme96->capture_periodsize) 1014 rme96->capture_periodsize)
1011 { 1015 {
1012 spin_unlock_irq(&rme96->lock); 1016 err = -EBUSY;
1013 return -EBUSY; 1017 goto error;
1014 } 1018 }
1015 } 1019 }
1016 rme96->playback_periodsize = 1020 rme96->playback_periodsize =
@@ -1021,9 +1025,16 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
1021 rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP); 1025 rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
1022 writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER); 1026 writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
1023 } 1027 }
1028
1029 err = 0;
1030 error:
1024 spin_unlock_irq(&rme96->lock); 1031 spin_unlock_irq(&rme96->lock);
1025 1032 if (apply_dac_volume) {
1026 return 0; 1033 usleep_range(3000, 10000);
1034 snd_rme96_apply_dac_volume(rme96);
1035 }
1036
1037 return err;
1027} 1038}
1028 1039
1029static int 1040static int
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index f494dced3c11..4f85757009b3 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1354,6 +1354,8 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1354 } 1354 }
1355 } 1355 }
1356 1356
1357 snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl);
1358
1357 range = (cval->max - cval->min) / cval->res; 1359 range = (cval->max - cval->min) / cval->res;
1358 /* 1360 /*
1359 * Are there devices with volume range more than 255? I use a bit more 1361 * Are there devices with volume range more than 255? I use a bit more
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 6a803eff87f7..ddca6547399b 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -348,13 +348,6 @@ static struct usbmix_name_map bose_companion5_map[] = {
348 { 0 } /* terminator */ 348 { 0 } /* terminator */
349}; 349};
350 350
351/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
352static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
353static struct usbmix_name_map dragonfly_1_2_map[] = {
354 { 7, NULL, .dB = &dragonfly_1_2_dB },
355 { 0 } /* terminator */
356};
357
358/* 351/*
359 * Control map entries 352 * Control map entries
360 */ 353 */
@@ -470,11 +463,6 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
470 .id = USB_ID(0x05a7, 0x1020), 463 .id = USB_ID(0x05a7, 0x1020),
471 .map = bose_companion5_map, 464 .map = bose_companion5_map,
472 }, 465 },
473 {
474 /* Dragonfly DAC 1.2 */
475 .id = USB_ID(0x21b4, 0x0081),
476 .map = dragonfly_1_2_map,
477 },
478 { 0 } /* terminator */ 466 { 0 } /* terminator */
479}; 467};
480 468
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index fe91184ce832..0ce888dceed0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -37,6 +37,7 @@
37#include <sound/control.h> 37#include <sound/control.h>
38#include <sound/hwdep.h> 38#include <sound/hwdep.h>
39#include <sound/info.h> 39#include <sound/info.h>
40#include <sound/tlv.h>
40 41
41#include "usbaudio.h" 42#include "usbaudio.h"
42#include "mixer.h" 43#include "mixer.h"
@@ -1825,3 +1826,39 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
1825 } 1826 }
1826} 1827}
1827 1828
1829static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
1830 struct snd_kcontrol *kctl)
1831{
1832 /* Approximation using 10 ranges based on output measurement on hw v1.2.
1833 * This seems close to the cubic mapping e.g. alsamixer uses. */
1834 static const DECLARE_TLV_DB_RANGE(scale,
1835 0, 1, TLV_DB_MINMAX_ITEM(-5300, -4970),
1836 2, 5, TLV_DB_MINMAX_ITEM(-4710, -4160),
1837 6, 7, TLV_DB_MINMAX_ITEM(-3884, -3710),
1838 8, 14, TLV_DB_MINMAX_ITEM(-3443, -2560),
1839 15, 16, TLV_DB_MINMAX_ITEM(-2475, -2324),
1840 17, 19, TLV_DB_MINMAX_ITEM(-2228, -2031),
1841 20, 26, TLV_DB_MINMAX_ITEM(-1910, -1393),
1842 27, 31, TLV_DB_MINMAX_ITEM(-1322, -1032),
1843 32, 40, TLV_DB_MINMAX_ITEM(-968, -490),
1844 41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
1845 );
1846
1847 usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n");
1848 kctl->tlv.p = scale;
1849 kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
1850 kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
1851}
1852
1853void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
1854 struct usb_mixer_elem_info *cval, int unitid,
1855 struct snd_kcontrol *kctl)
1856{
1857 switch (mixer->chip->usb_id) {
1858 case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
1859 if (unitid == 7 && cval->min == 0 && cval->max == 50)
1860 snd_dragonfly_quirk_db_scale(mixer, kctl);
1861 break;
1862 }
1863}
1864
diff --git a/sound/usb/mixer_quirks.h b/sound/usb/mixer_quirks.h
index bdbfab093816..177c329cd4dd 100644
--- a/sound/usb/mixer_quirks.h
+++ b/sound/usb/mixer_quirks.h
@@ -9,5 +9,9 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
9void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer, 9void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
10 int unitid); 10 int unitid);
11 11
12void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
13 struct usb_mixer_elem_info *cval, int unitid,
14 struct snd_kcontrol *kctl);
15
12#endif /* SND_USB_MIXER_QUIRKS_H */ 16#endif /* SND_USB_MIXER_QUIRKS_H */
13 17
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 7016ad898187..b6c0c8e3b450 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1125,6 +1125,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1125 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1125 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
1126 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1126 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1127 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1127 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1128 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
1128 return true; 1129 return true;
1129 } 1130 }
1130 return false; 1131 return false;
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 0a3da64638ce..4db7d5691ba7 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -110,4 +110,10 @@ static inline void free_page(unsigned long addr)
110 (void) (&_min1 == &_min2); \ 110 (void) (&_min1 == &_min2); \
111 _min1 < _min2 ? _min1 : _min2; }) 111 _min1 < _min2 ? _min1 : _min2; })
112 112
113/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
114#define list_add_tail(a, b) do {} while (0)
115#define list_del(a) do {} while (0)
116#define list_for_each_entry(a, b, c) while (0)
117/* end of stubs */
118
113#endif /* KERNEL_H */ 119#endif /* KERNEL_H */
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index a3e07016a440..ee125e714053 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -3,12 +3,6 @@
3#include <linux/scatterlist.h> 3#include <linux/scatterlist.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5 5
6/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
7#define list_add_tail(a, b) do {} while (0)
8#define list_del(a) do {} while (0)
9#define list_for_each_entry(a, b, c) while (0)
10/* end of stubs */
11
12struct virtio_device { 6struct virtio_device {
13 void *dev; 7 void *dev;
14 u64 features; 8 u64 features;
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 806d683ab107..57a6964a1e35 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -40,33 +40,39 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
40#define virtio_has_feature(dev, feature) \ 40#define virtio_has_feature(dev, feature) \
41 (__virtio_test_bit((dev), feature)) 41 (__virtio_test_bit((dev), feature))
42 42
43static inline bool virtio_is_little_endian(struct virtio_device *vdev)
44{
45 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
46 virtio_legacy_is_little_endian();
47}
48
49/* Memory accessors */
43static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) 50static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
44{ 51{
45 return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 52 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
46} 53}
47 54
48static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) 55static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
49{ 56{
50 return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 57 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
51} 58}
52 59
53static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) 60static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
54{ 61{
55 return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 62 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
56} 63}
57 64
58static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) 65static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
59{ 66{
60 return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 67 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
61} 68}
62 69
63static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) 70static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
64{ 71{
65 return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 72 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
66} 73}
67 74
68static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) 75static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
69{ 76{
70 return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 77 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
71} 78}
72
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 65461f821a75..7a2f449bd85d 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1114,7 +1114,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1114 return true; 1114 return true;
1115 } 1115 }
1116 1116
1117 return dist_active_irq(vcpu); 1117 return vgic_irq_is_active(vcpu, map->virt_irq);
1118} 1118}
1119 1119
1120/* 1120/*